Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/modal.service.spec.ts
|
import { Component } from '@angular/core';
import { fakeAsync, TestBed, tick } from '@angular/core/testing';
import { NgbActiveModal, NgbModal, NgbModalModule } from '@ng-bootstrap/ng-bootstrap';
import { configureTestBed } from '~/testing/unit-test-helper';
import { ModalService } from './modal.service';
@Component({
template: ``
})
class MockComponent {
foo = '';
constructor(public activeModal: NgbActiveModal) {}
}
describe('ModalService', () => {
let service: ModalService;
let ngbModal: NgbModal;
configureTestBed({ declarations: [MockComponent], imports: [NgbModalModule] }, [MockComponent]);
beforeEach(() => {
service = TestBed.inject(ModalService);
ngbModal = TestBed.inject(NgbModal);
});
it('should be created', () => {
expect(service).toBeTruthy();
});
it('should call NgbModal.open when show is called', () => {
spyOn(ngbModal, 'open').and.callThrough();
const modaRef = service.show(MockComponent, { foo: 'bar' });
expect(ngbModal.open).toBeCalled();
expect(modaRef.componentInstance.foo).toBe('bar');
expect(modaRef.componentInstance.activeModal).toBeTruthy();
});
it('should call dismissAll and hasOpenModals', fakeAsync(() => {
spyOn(ngbModal, 'dismissAll').and.callThrough();
spyOn(ngbModal, 'hasOpenModals').and.callThrough();
expect(ngbModal.hasOpenModals()).toBeFalsy();
service.show(MockComponent, { foo: 'bar' });
expect(service.hasOpenModals()).toBeTruthy();
service.dismissAll();
tick();
expect(service.hasOpenModals()).toBeFalsy();
expect(ngbModal.dismissAll).toBeCalled();
expect(ngbModal.hasOpenModals).toBeCalled();
}));
});
| 1,673 | 26.9 | 98 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/modal.service.ts
|
import { Injectable } from '@angular/core';
import { NgbModal, NgbModalOptions, NgbModalRef } from '@ng-bootstrap/ng-bootstrap';
@Injectable({
providedIn: 'root'
})
export class ModalService {
constructor(private modal: NgbModal) {}
show(component: any, initialState?: any, options?: NgbModalOptions): NgbModalRef {
const modalRef = this.modal.open(component, options);
if (initialState) {
Object.assign(modalRef.componentInstance, initialState);
}
return modalRef;
}
dismissAll() {
this.modal.dismissAll();
}
hasOpenModals() {
return this.modal.hasOpenModals();
}
}
| 620 | 20.413793 | 84 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/module-status-guard.service.spec.ts
|
import { HttpClient } from '@angular/common/http';
import { Component, NgZone } from '@angular/core';
import { fakeAsync, TestBed, tick } from '@angular/core/testing';
import { ActivatedRouteSnapshot, Router, Routes } from '@angular/router';
import { RouterTestingModule } from '@angular/router/testing';
import { of as observableOf, throwError } from 'rxjs';
import { configureTestBed } from '~/testing/unit-test-helper';
import { MgrModuleService } from '../api/mgr-module.service';
import { ModuleStatusGuardService } from './module-status-guard.service';
describe('ModuleStatusGuardService', () => {
let service: ModuleStatusGuardService;
let httpClient: HttpClient;
let router: Router;
let route: ActivatedRouteSnapshot;
let ngZone: NgZone;
let mgrModuleService: MgrModuleService;
@Component({ selector: 'cd-foo', template: '' })
class FooComponent {}
const fakeService = {
get: () => true
};
const routes: Routes = [{ path: '**', component: FooComponent }];
const testCanActivate = (
getResult: {},
activateResult: boolean,
urlResult: string,
backend = 'cephadm',
configOptPermission = true
) => {
let result: boolean;
spyOn(httpClient, 'get').and.returnValue(observableOf(getResult));
const orchBackend = { orchestrator: backend };
const getConfigSpy = spyOn(mgrModuleService, 'getConfig');
configOptPermission
? getConfigSpy.and.returnValue(observableOf(orchBackend))
: getConfigSpy.and.returnValue(throwError({}));
ngZone.run(() => {
service.canActivateChild(route).subscribe((resp) => {
result = resp;
});
});
tick();
expect(result).toBe(activateResult);
expect(router.url).toBe(urlResult);
};
configureTestBed({
imports: [RouterTestingModule.withRoutes(routes)],
providers: [ModuleStatusGuardService, { provide: HttpClient, useValue: fakeService }],
declarations: [FooComponent]
});
beforeEach(() => {
service = TestBed.inject(ModuleStatusGuardService);
httpClient = TestBed.inject(HttpClient);
mgrModuleService = TestBed.inject(MgrModuleService);
router = TestBed.inject(Router);
route = new ActivatedRouteSnapshot();
route.url = [];
route.data = {
moduleStatusGuardConfig: {
uiApiPath: 'bar',
redirectTo: '/foo',
backend: 'rook'
}
};
ngZone = TestBed.inject(NgZone);
});
it('should be created', () => {
expect(service).toBeTruthy();
});
it('should test canActivate with status available', fakeAsync(() => {
route.data.moduleStatusGuardConfig.redirectTo = 'foo';
testCanActivate({ available: true, message: 'foo' }, true, '/');
}));
it('should test canActivateChild with status unavailable', fakeAsync(() => {
testCanActivate({ available: false, message: null }, false, '/foo');
}));
it('should test canActivateChild with status unavailable', fakeAsync(() => {
testCanActivate(null, false, '/foo');
}));
it('should redirect normally if the backend provided matches the current backend', fakeAsync(() => {
testCanActivate({ available: true, message: 'foo' }, true, '/', 'rook');
}));
it('should redirect to the "redirectTo" link for user without sufficient permission', fakeAsync(() => {
testCanActivate({ available: true, message: 'foo' }, true, '/foo', 'rook', false);
}));
});
| 3,375 | 31.776699 | 105 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/module-status-guard.service.ts
|
import { HttpClient } from '@angular/common/http';
import { Injectable } from '@angular/core';
import { ActivatedRouteSnapshot, CanActivate, CanActivateChild, Router } from '@angular/router';
import { of as observableOf } from 'rxjs';
import { catchError, map } from 'rxjs/operators';
import { MgrModuleService } from '~/app/shared/api/mgr-module.service';
import { Icons } from '~/app/shared/enum/icons.enum';
/**
* This service checks if a route can be activated by executing a
* REST API call to '/ui-api/<uiApiPath>/status'. If the returned response
* states that the module is not available, then the user is redirected
* to the specified <redirectTo> URL path.
*
* A controller implementing this endpoint should return an object of
* the following form:
* {'available': true|false, 'message': null|string}.
*
* The configuration of this guard should look like this:
* const routes: Routes = [
* {
* path: 'rgw/bucket',
* component: RgwBucketListComponent,
* canActivate: [AuthGuardService, ModuleStatusGuardService],
* data: {
* moduleStatusGuardConfig: {
* uiApiPath: 'rgw',
* redirectTo: 'rgw/501'
* }
* }
* },
* ...
*/
@Injectable({
providedIn: 'root'
})
export class ModuleStatusGuardService implements CanActivate, CanActivateChild {
// TODO: Hotfix - remove ALLOWLIST'ing when a generic ErrorComponent is implemented
static readonly ALLOWLIST: string[] = ['501'];
constructor(
private http: HttpClient,
private router: Router,
private mgrModuleService: MgrModuleService
) {}
canActivate(route: ActivatedRouteSnapshot) {
return this.doCheck(route);
}
canActivateChild(childRoute: ActivatedRouteSnapshot) {
return this.doCheck(childRoute);
}
private doCheck(route: ActivatedRouteSnapshot) {
if (route.url.length > 0 && ModuleStatusGuardService.ALLOWLIST.includes(route.url[0].path)) {
return observableOf(true);
}
const config = route.data['moduleStatusGuardConfig'];
let backendCheck = false;
if (config.backend) {
this.mgrModuleService.getConfig('orchestrator').subscribe(
(resp) => {
backendCheck = config.backend === resp['orchestrator'];
},
() => {
this.router.navigate([config.redirectTo]);
return observableOf(false);
}
);
}
return this.http.get(`ui-api/${config.uiApiPath}/status`).pipe(
map((resp: any) => {
if (!resp.available && !backendCheck) {
this.router.navigate([config.redirectTo || ''], {
state: {
header: config.header,
message: resp.message,
section: config.section,
section_info: config.section_info,
button_name: config.button_name,
button_route: config.button_route,
button_title: config.button_title,
secondary_button_name: config.secondary_button_name,
secondary_button_route: config.secondary_button_route,
secondary_button_title: config.secondary_button_title,
uiConfig: config.uiConfig,
uiApiPath: config.uiApiPath,
icon: Icons.wrench,
component: config.component
}
});
}
return resp.available;
}),
catchError(() => {
this.router.navigate([config.redirectTo]);
return observableOf(false);
})
);
}
}
| 3,456 | 31.92381 | 97 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/motd-notification.service.spec.ts
|
import { HttpClientTestingModule } from '@angular/common/http/testing';
import { TestBed } from '@angular/core/testing';
import { Motd } from '~/app/shared/api/motd.service';
import { configureTestBed } from '~/testing/unit-test-helper';
import { MotdNotificationService } from './motd-notification.service';
describe('MotdNotificationService', () => {
let service: MotdNotificationService;
configureTestBed({
providers: [MotdNotificationService],
imports: [HttpClientTestingModule]
});
beforeEach(() => {
service = TestBed.inject(MotdNotificationService);
});
it('should be created', () => {
expect(service).toBeTruthy();
});
it('should hide [1]', () => {
spyOn(service.motdSource, 'next');
spyOn(service.motdSource, 'getValue').and.returnValue({
severity: 'info',
expires: '',
message: 'foo',
md5: 'acbd18db4cc2f85cedef654fccc4a4d8'
});
service.hide();
expect(localStorage.getItem('dashboard_motd_hidden')).toBe(
'info:acbd18db4cc2f85cedef654fccc4a4d8'
);
expect(sessionStorage.getItem('dashboard_motd_hidden')).toBeNull();
expect(service.motdSource.next).toBeCalledWith(null);
});
it('should hide [2]', () => {
spyOn(service.motdSource, 'getValue').and.returnValue({
severity: 'warning',
expires: '',
message: 'bar',
md5: '37b51d194a7513e45b56f6524f2d51f2'
});
service.hide();
expect(sessionStorage.getItem('dashboard_motd_hidden')).toBe(
'warning:37b51d194a7513e45b56f6524f2d51f2'
);
expect(localStorage.getItem('dashboard_motd_hidden')).toBeNull();
});
it('should process response [1]', () => {
const motd: Motd = {
severity: 'danger',
expires: '',
message: 'foo',
md5: 'acbd18db4cc2f85cedef654fccc4a4d8'
};
spyOn(service.motdSource, 'next');
service.processResponse(motd);
expect(service.motdSource.next).toBeCalledWith(motd);
});
it('should process response [2]', () => {
const motd: Motd = {
severity: 'warning',
expires: '',
message: 'foo',
md5: 'acbd18db4cc2f85cedef654fccc4a4d8'
};
localStorage.setItem('dashboard_motd_hidden', 'info');
service.processResponse(motd);
expect(sessionStorage.getItem('dashboard_motd_hidden')).toBeNull();
expect(localStorage.getItem('dashboard_motd_hidden')).toBeNull();
});
it('should process response [3]', () => {
const motd: Motd = {
severity: 'info',
expires: '',
message: 'foo',
md5: 'acbd18db4cc2f85cedef654fccc4a4d8'
};
spyOn(service.motdSource, 'next');
localStorage.setItem('dashboard_motd_hidden', 'info:acbd18db4cc2f85cedef654fccc4a4d8');
service.processResponse(motd);
expect(service.motdSource.next).not.toBeCalled();
});
it('should process response [4]', () => {
const motd: Motd = {
severity: 'info',
expires: '',
message: 'foo',
md5: 'acbd18db4cc2f85cedef654fccc4a4d8'
};
spyOn(service.motdSource, 'next');
localStorage.setItem('dashboard_motd_hidden', 'info:37b51d194a7513e45b56f6524f2d51f2');
service.processResponse(motd);
expect(service.motdSource.next).toBeCalled();
});
it('should process response [5]', () => {
const motd: Motd = {
severity: 'info',
expires: '',
message: 'foo',
md5: 'acbd18db4cc2f85cedef654fccc4a4d8'
};
spyOn(service.motdSource, 'next');
localStorage.setItem('dashboard_motd_hidden', 'danger:acbd18db4cc2f85cedef654fccc4a4d8');
service.processResponse(motd);
expect(service.motdSource.next).toBeCalled();
});
});
| 3,620 | 29.686441 | 93 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/motd-notification.service.ts
|
import { Injectable, OnDestroy } from '@angular/core';
import * as _ from 'lodash';
import { BehaviorSubject, EMPTY, Observable, of, Subscription } from 'rxjs';
import { catchError, delay, mergeMap, repeat, tap } from 'rxjs/operators';
import { Motd, MotdService } from '~/app/shared/api/motd.service';
import { whenPageVisible } from '../rxjs/operators/page-visibilty.operator';
@Injectable({
providedIn: 'root'
})
export class MotdNotificationService implements OnDestroy {
public motd$: Observable<Motd | null>;
public motdSource = new BehaviorSubject<Motd | null>(null);
private subscription: Subscription;
private localStorageKey = 'dashboard_motd_hidden';
constructor(private motdService: MotdService) {
this.motd$ = this.motdSource.asObservable();
// Check every 60 seconds for the latest MOTD configuration.
this.subscription = of(true)
.pipe(
mergeMap(() => this.motdService.get()),
catchError((error) => {
// Do not show an error notification.
if (_.isFunction(error.preventDefault)) {
error.preventDefault();
}
return EMPTY;
}),
tap((motd: Motd | null) => this.processResponse(motd)),
delay(60000),
repeat(),
whenPageVisible()
)
.subscribe();
}
ngOnDestroy(): void {
this.subscription.unsubscribe();
}
hide() {
// Store the severity and MD5 of the current MOTD in local or
// session storage to be able to show it again if the severity
// or message of the latest MOTD has changed.
const motd: Motd = this.motdSource.getValue();
if (motd) {
const value = `${motd.severity}:${motd.md5}`;
switch (motd.severity) {
case 'info':
localStorage.setItem(this.localStorageKey, value);
sessionStorage.removeItem(this.localStorageKey);
break;
case 'warning':
sessionStorage.setItem(this.localStorageKey, value);
localStorage.removeItem(this.localStorageKey);
break;
}
}
this.motdSource.next(null);
}
processResponse(motd: Motd | null) {
const value: string | null =
sessionStorage.getItem(this.localStorageKey) || localStorage.getItem(this.localStorageKey);
let visible: boolean = _.isNull(value);
// Force a hidden MOTD to be shown again if the severity or message
// has been changed.
if (!visible && motd) {
const [severity, md5] = value.split(':');
if (severity !== motd.severity || md5 !== motd.md5) {
visible = true;
sessionStorage.removeItem(this.localStorageKey);
localStorage.removeItem(this.localStorageKey);
}
}
if (visible) {
this.motdSource.next(motd);
}
}
}
| 2,756 | 31.435294 | 97 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/ngzone-scheduler.service.ts
|
import { Injectable, NgZone } from '@angular/core';
import { asyncScheduler, SchedulerLike, Subscription } from 'rxjs';
abstract class NgZoneScheduler implements SchedulerLike {
protected scheduler = asyncScheduler;
constructor(protected zone: NgZone) {}
abstract schedule(...args: any[]): Subscription;
now(): number {
return this.scheduler.now();
}
}
@Injectable({
providedIn: 'root'
})
export class LeaveNgZoneScheduler extends NgZoneScheduler {
constructor(zone: NgZone) {
super(zone);
}
schedule(...args: any[]): Subscription {
return this.zone.runOutsideAngular(() => this.scheduler.schedule.apply(this.scheduler, args));
}
}
@Injectable({
providedIn: 'root'
})
export class EnterNgZoneScheduler extends NgZoneScheduler {
constructor(zone: NgZone) {
super(zone);
}
schedule(...args: any[]): Subscription {
return this.zone.run(() => this.scheduler.schedule.apply(this.scheduler, args));
}
}
@Injectable({
providedIn: 'root'
})
export class NgZoneSchedulerService {
constructor(public leave: LeaveNgZoneScheduler, public enter: EnterNgZoneScheduler) {}
}
| 1,124 | 21.959184 | 98 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/no-sso-guard.service.spec.ts
|
import { Component, NgZone } from '@angular/core';
import { fakeAsync, TestBed, tick } from '@angular/core/testing';
import { Routes } from '@angular/router';
import { RouterTestingModule } from '@angular/router/testing';
import { DashboardUserDeniedError } from '~/app/core/error/error';
import { configureTestBed } from '~/testing/unit-test-helper';
import { AuthStorageService } from './auth-storage.service';
import { NoSsoGuardService } from './no-sso-guard.service';
describe('NoSsoGuardService', () => {
let service: NoSsoGuardService;
let authStorageService: AuthStorageService;
let ngZone: NgZone;
@Component({ selector: 'cd-404', template: '' })
class NotFoundComponent {}
const routes: Routes = [{ path: '404', component: NotFoundComponent }];
configureTestBed({
imports: [RouterTestingModule.withRoutes(routes)],
providers: [NoSsoGuardService, AuthStorageService],
declarations: [NotFoundComponent]
});
beforeEach(() => {
service = TestBed.inject(NoSsoGuardService);
authStorageService = TestBed.inject(AuthStorageService);
ngZone = TestBed.inject(NgZone);
});
it('should be created', () => {
expect(service).toBeTruthy();
});
it('should allow if not logged in via SSO', () => {
spyOn(authStorageService, 'isSSO').and.returnValue(false);
expect(service.canActivate()).toBe(true);
});
it('should prevent if logged in via SSO', fakeAsync(() => {
spyOn(authStorageService, 'isSSO').and.returnValue(true);
ngZone.run(() => {
expect(() => service.canActivate()).toThrowError(DashboardUserDeniedError);
});
tick();
}));
});
| 1,629 | 31.6 | 81 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/no-sso-guard.service.ts
|
import { Injectable } from '@angular/core';
import { CanActivate, CanActivateChild } from '@angular/router';
import { DashboardUserDeniedError } from '~/app/core/error/error';
import { AuthStorageService } from './auth-storage.service';
/**
* This service checks if a route can be activated if the user has not
* been logged in via SSO.
*/
@Injectable({
providedIn: 'root'
})
export class NoSsoGuardService implements CanActivate, CanActivateChild {
constructor(private authStorageService: AuthStorageService) {}
canActivate() {
if (!this.authStorageService.isSSO()) {
return true;
}
throw new DashboardUserDeniedError();
return false;
}
canActivateChild(): boolean {
return this.canActivate();
}
}
| 744 | 24.689655 | 73 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/notification.service.spec.ts
|
import { HttpClientTestingModule } from '@angular/common/http/testing';
import { fakeAsync, TestBed, tick } from '@angular/core/testing';
import _ from 'lodash';
import { ToastrService } from 'ngx-toastr';
import { configureTestBed } from '~/testing/unit-test-helper';
import { RbdService } from '../api/rbd.service';
import { NotificationType } from '../enum/notification-type.enum';
import { CdNotificationConfig } from '../models/cd-notification';
import { FinishedTask } from '../models/finished-task';
import { CdDatePipe } from '../pipes/cd-date.pipe';
import { NotificationService } from './notification.service';
import { TaskMessageService } from './task-message.service';
describe('NotificationService', () => {
let service: NotificationService;
const toastFakeService = {
error: () => true,
info: () => true,
success: () => true
};
configureTestBed({
providers: [
NotificationService,
TaskMessageService,
{ provide: ToastrService, useValue: toastFakeService },
{ provide: CdDatePipe, useValue: { transform: (d: any) => d } },
RbdService
],
imports: [HttpClientTestingModule]
});
beforeEach(() => {
service = TestBed.inject(NotificationService);
service.removeAll();
});
it('should be created', () => {
expect(service).toBeTruthy();
});
it('should read empty notification list', () => {
localStorage.setItem('cdNotifications', '[]');
expect(service['dataSource'].getValue()).toEqual([]);
});
it('should read old notifications', fakeAsync(() => {
localStorage.setItem(
'cdNotifications',
'[{"type":2,"message":"foobar","timestamp":"2018-05-24T09:41:32.726Z"}]'
);
service = new NotificationService(null, null, null);
expect(service['dataSource'].getValue().length).toBe(1);
}));
it('should cancel a notification', fakeAsync(() => {
const timeoutId = service.show(NotificationType.error, 'Simple test');
service.cancel(timeoutId);
tick(5000);
expect(service['dataSource'].getValue().length).toBe(0);
}));
describe('Saved notifications', () => {
const expectSavedNotificationToHave = (expected: object) => {
tick(510);
expect(service['dataSource'].getValue().length).toBe(1);
const notification = service['dataSource'].getValue()[0];
Object.keys(expected).forEach((key) => {
expect(notification[key]).toBe(expected[key]);
});
};
const addNotifications = (quantity: number) => {
for (let index = 0; index < quantity; index++) {
service.show(NotificationType.info, `${index}`);
tick(510);
}
};
beforeEach(() => {
spyOn(service, 'show').and.callThrough();
service.cancel((<any>service)['justShownTimeoutId']);
});
it('should create a success notification and save it', fakeAsync(() => {
service.show(new CdNotificationConfig(NotificationType.success, 'Simple test'));
expectSavedNotificationToHave({ type: NotificationType.success });
}));
it('should create an error notification and save it', fakeAsync(() => {
service.show(NotificationType.error, 'Simple test');
expectSavedNotificationToHave({ type: NotificationType.error });
}));
it('should create an info notification and save it', fakeAsync(() => {
service.show(new CdNotificationConfig(NotificationType.info, 'Simple test'));
expectSavedNotificationToHave({
type: NotificationType.info,
title: 'Simple test',
message: undefined
});
}));
it('should never have more then 10 notifications', fakeAsync(() => {
addNotifications(15);
expect(service['dataSource'].getValue().length).toBe(10);
}));
it('should show a success task notification, but not save it', fakeAsync(() => {
const task = _.assign(new FinishedTask(), {
success: true
});
service.notifyTask(task, true);
tick(1500);
expect(service.show).toHaveBeenCalled();
const notifications = service['dataSource'].getValue();
expect(notifications.length).toBe(0);
}));
it('should be able to stop notifyTask from notifying', fakeAsync(() => {
const task = _.assign(new FinishedTask(), {
success: true
});
const timeoutId = service.notifyTask(task, true);
service.cancel(timeoutId);
tick(100);
expect(service['dataSource'].getValue().length).toBe(0);
}));
it('should show a error task notification', fakeAsync(() => {
const task = _.assign(
new FinishedTask('rbd/create', {
pool_name: 'somePool',
image_name: 'someImage'
}),
{
success: false,
exception: {
code: 17
}
}
);
service.notifyTask(task);
tick(1500);
expect(service.show).toHaveBeenCalled();
const notifications = service['dataSource'].getValue();
expect(notifications.length).toBe(0);
}));
it('combines different notifications with the same title', fakeAsync(() => {
service.show(NotificationType.error, '502 - Bad Gateway', 'Error occurred in path a');
tick(60);
service.show(NotificationType.error, '502 - Bad Gateway', 'Error occurred in path b');
expectSavedNotificationToHave({
type: NotificationType.error,
title: '502 - Bad Gateway',
message: '<ul><li>Error occurred in path a</li><li>Error occurred in path b</li></ul>'
});
}));
it('should remove a single notification', fakeAsync(() => {
addNotifications(5);
let messages = service['dataSource'].getValue().map((notification) => notification.title);
expect(messages).toEqual(['4', '3', '2', '1', '0']);
service.remove(2);
messages = service['dataSource'].getValue().map((notification) => notification.title);
expect(messages).toEqual(['4', '3', '1', '0']);
}));
it('should remove all notifications', fakeAsync(() => {
addNotifications(5);
expect(service['dataSource'].getValue().length).toBe(5);
service.removeAll();
expect(service['dataSource'].getValue().length).toBe(0);
}));
});
describe('notification queue', () => {
const n1 = new CdNotificationConfig(NotificationType.success, 'Some success');
const n2 = new CdNotificationConfig(NotificationType.info, 'Some info');
const showArray = (arr: any[]) => arr.forEach((n) => service.show(n));
beforeEach(() => {
spyOn(service, 'save').and.stub();
});
it('filters out duplicated notifications on single call', fakeAsync(() => {
showArray([n1, n1, n2, n2]);
tick(510);
expect(service.save).toHaveBeenCalledTimes(2);
}));
it('filters out duplicated notifications presented in different calls', fakeAsync(() => {
showArray([n1, n2]);
showArray([n1, n2]);
tick(1000);
expect(service.save).toHaveBeenCalledTimes(2);
}));
it('will reset the timeout on every call', fakeAsync(() => {
showArray([n1, n2]);
tick(490);
showArray([n1, n2]);
tick(450);
expect(service.save).toHaveBeenCalledTimes(0);
tick(60);
expect(service.save).toHaveBeenCalledTimes(2);
}));
it('wont filter out duplicated notifications if timeout was reached before', fakeAsync(() => {
showArray([n1, n2]);
tick(510);
showArray([n1, n2]);
tick(510);
expect(service.save).toHaveBeenCalledTimes(4);
}));
});
describe('showToasty', () => {
let toastr: ToastrService;
const time = '2022-02-22T00:00:00.000Z';
beforeEach(() => {
const baseTime = new Date(time);
spyOn(global, 'Date').and.returnValue(baseTime);
spyOn(window, 'setTimeout').and.callFake((fn) => fn());
toastr = TestBed.inject(ToastrService);
// spyOn needs to know the methods before spying and can't read the array for clarification
['error', 'info', 'success'].forEach((method: 'error' | 'info' | 'success') =>
spyOn(toastr, method).and.stub()
);
});
it('should show with only title defined', () => {
service.show(NotificationType.info, 'Some info');
expect(toastr.info).toHaveBeenCalledWith(
`<small class="date">${time}</small>` +
'<i class="float-end custom-icon ceph-icon" title="Ceph"></i>',
'Some info',
undefined
);
});
it('should show with title and message defined', () => {
service.show(
() =>
new CdNotificationConfig(NotificationType.error, 'Some error', 'Some operation failed')
);
expect(toastr.error).toHaveBeenCalledWith(
'Some operation failed<br>' +
`<small class="date">${time}</small>` +
'<i class="float-end custom-icon ceph-icon" title="Ceph"></i>',
'Some error',
undefined
);
});
it('should show with title, message and application defined', () => {
service.show(
new CdNotificationConfig(
NotificationType.success,
'Alert resolved',
'Some alert resolved',
undefined,
'Prometheus'
)
);
expect(toastr.success).toHaveBeenCalledWith(
'Some alert resolved<br>' +
`<small class="date">${time}</small>` +
'<i class="float-end custom-icon prometheus-icon" title="Prometheus"></i>',
'Alert resolved',
undefined
);
});
});
});
| 9,478 | 32.143357 | 98 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/notification.service.ts
|
import { Injectable } from '@angular/core';
import _ from 'lodash';
import { IndividualConfig, ToastrService } from 'ngx-toastr';
import { BehaviorSubject, Subject } from 'rxjs';
import { NotificationType } from '../enum/notification-type.enum';
import { CdNotification, CdNotificationConfig } from '../models/cd-notification';
import { FinishedTask } from '../models/finished-task';
import { CdDatePipe } from '../pipes/cd-date.pipe';
import { TaskMessageService } from './task-message.service';
@Injectable({
providedIn: 'root'
})
export class NotificationService {
private hideToasties = false;
// Data observable
private dataSource = new BehaviorSubject<CdNotification[]>([]);
data$ = this.dataSource.asObservable();
// Sidebar observable
sidebarSubject = new Subject();
private queued: CdNotificationConfig[] = [];
private queuedTimeoutId: number;
KEY = 'cdNotifications';
constructor(
public toastr: ToastrService,
private taskMessageService: TaskMessageService,
private cdDatePipe: CdDatePipe
) {
const stringNotifications = localStorage.getItem(this.KEY);
let notifications: CdNotification[] = [];
if (_.isString(stringNotifications)) {
notifications = JSON.parse(stringNotifications, (_key, value) => {
if (_.isPlainObject(value)) {
return _.assign(new CdNotification(), value);
}
return value;
});
}
this.dataSource.next(notifications);
}
/**
* Removes all current saved notifications
*/
removeAll() {
localStorage.removeItem(this.KEY);
this.dataSource.next([]);
}
/**
* Removes a single saved notifications
*/
remove(index: number) {
const recent = this.dataSource.getValue();
recent.splice(index, 1);
this.dataSource.next(recent);
localStorage.setItem(this.KEY, JSON.stringify(recent));
}
/**
* Method used for saving a shown notification (check show() method).
*/
save(notification: CdNotification) {
const recent = this.dataSource.getValue();
recent.push(notification);
recent.sort((a, b) => (a.timestamp > b.timestamp ? -1 : 1));
while (recent.length > 10) {
recent.pop();
}
this.dataSource.next(recent);
localStorage.setItem(this.KEY, JSON.stringify(recent));
}
/**
* Method for showing a notification.
* @param {NotificationType} type toastr type
* @param {string} title
* @param {string} [message] The message to be displayed. Note, use this field
* for error notifications only.
* @param {*} [options] toastr compatible options, used when creating a toastr
* @param {string} [application] Only needed if notification comes from an external application
* @returns The timeout ID that is set to be able to cancel the notification.
*/
show(
type: NotificationType,
title: string,
message?: string,
options?: any | IndividualConfig,
application?: string
): number;
show(config: CdNotificationConfig | (() => CdNotificationConfig)): number;
show(
arg: NotificationType | CdNotificationConfig | (() => CdNotificationConfig),
title?: string,
message?: string,
options?: any | IndividualConfig,
application?: string
): number {
return window.setTimeout(() => {
let config: CdNotificationConfig;
if (_.isFunction(arg)) {
config = arg() as CdNotificationConfig;
} else if (_.isObject(arg)) {
config = arg as CdNotificationConfig;
} else {
config = new CdNotificationConfig(
arg as NotificationType,
title,
message,
options,
application
);
}
this.queueToShow(config);
}, 10);
}
private queueToShow(config: CdNotificationConfig) {
this.cancel(this.queuedTimeoutId);
if (!this.queued.find((c) => _.isEqual(c, config))) {
this.queued.push(config);
}
this.queuedTimeoutId = window.setTimeout(() => {
this.showQueued();
}, 500);
}
private showQueued() {
this.getUnifiedTitleQueue().forEach((config) => {
const notification = new CdNotification(config);
if (!notification.isFinishedTask) {
this.save(notification);
}
this.showToasty(notification);
});
}
private getUnifiedTitleQueue(): CdNotificationConfig[] {
return Object.values(this.queueShiftByTitle()).map((configs) => {
const config = configs[0];
if (configs.length > 1) {
config.message = '<ul>' + configs.map((c) => `<li>${c.message}</li>`).join('') + '</ul>';
}
return config;
});
}
private queueShiftByTitle(): { [key: string]: CdNotificationConfig[] } {
const byTitle: { [key: string]: CdNotificationConfig[] } = {};
let config: CdNotificationConfig;
while ((config = this.queued.shift())) {
if (!byTitle[config.title]) {
byTitle[config.title] = [];
}
byTitle[config.title].push(config);
}
return byTitle;
}
private showToasty(notification: CdNotification) {
// Exit immediately if no toasty should be displayed.
if (this.hideToasties) {
return;
}
this.toastr[['error', 'info', 'success'][notification.type]](
(notification.message ? notification.message + '<br>' : '') +
this.renderTimeAndApplicationHtml(notification),
notification.title,
notification.options
);
}
renderTimeAndApplicationHtml(notification: CdNotification): string {
return `<small class="date">${this.cdDatePipe.transform(
notification.timestamp
)}</small><i class="float-end custom-icon ${notification.applicationClass}" title="${
notification.application
}"></i>`;
}
notifyTask(finishedTask: FinishedTask, success: boolean = true): number {
const notification = this.finishedTaskToNotification(finishedTask, success);
notification.isFinishedTask = true;
return this.show(notification);
}
finishedTaskToNotification(
finishedTask: FinishedTask,
success: boolean = true
): CdNotificationConfig {
let notification: CdNotificationConfig;
if (finishedTask.success && success) {
notification = new CdNotificationConfig(
NotificationType.success,
this.taskMessageService.getSuccessTitle(finishedTask)
);
} else {
notification = new CdNotificationConfig(
NotificationType.error,
this.taskMessageService.getErrorTitle(finishedTask),
this.taskMessageService.getErrorMessage(finishedTask)
);
}
notification.isFinishedTask = true;
return notification;
}
/**
* Prevent the notification from being shown.
* @param {number} timeoutId A number representing the ID of the timeout to be canceled.
*/
cancel(timeoutId: number) {
window.clearTimeout(timeoutId);
}
/**
* Suspend showing the notification toasties.
* @param {boolean} suspend Set to ``true`` to disable/hide toasties.
*/
suspendToasties(suspend: boolean) {
this.hideToasties = suspend;
}
toggleSidebar(forceClose = false) {
this.sidebarSubject.next(forceClose);
}
}
| 7,101 | 28.840336 | 97 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/number-formatter.service.spec.ts
|
import { TestBed } from '@angular/core/testing';
import { NumberFormatterService } from './number-formatter.service';
describe('FormatToService', () => {
let service: NumberFormatterService;
beforeEach(() => {
TestBed.configureTestingModule({});
service = TestBed.inject(NumberFormatterService);
});
it('should be created', () => {
expect(service).toBeTruthy();
});
});
| 396 | 22.352941 | 68 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/number-formatter.service.ts
|
import { Injectable } from '@angular/core';
import { FormatterService } from './formatter.service';
@Injectable({
providedIn: 'root'
})
export class NumberFormatterService {
readonly bytesLabels = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'];
readonly bytesPerSecondLabels = [
'B/s',
'KiB/s',
'MiB/s',
'GiB/s',
'TiB/s',
'PiB/s',
'EiB/s',
'ZiB/s',
'YiB/s'
];
readonly secondsLabels = ['ns', 'μs', 'ms', 's', 'ks', 'Ms'];
readonly unitlessLabels = ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'];
constructor(private formatter: FormatterService) {}
formatFromTo(
value: any,
units: string,
targetedUnits: string,
factor: number,
labels: string[],
decimals: number = 1
): any {
return this.formatter.formatNumberFromTo(value, units, targetedUnits, factor, labels, decimals);
}
formatBytesFromTo(value: any, units: string, targetedUnits: string, decimals: number = 1): any {
return this.formatFromTo(value, units, targetedUnits, 1024, this.bytesLabels, decimals);
}
formatBytesPerSecondFromTo(
value: any,
units: string,
targetedUnits: string,
decimals: number = 1
): any {
return this.formatFromTo(
value,
units,
targetedUnits,
1024,
this.bytesPerSecondLabels,
decimals
);
}
formatSecondsFromTo(value: any, units: string, targetedUnits: string, decimals: number = 1): any {
return this.formatFromTo(value, units, targetedUnits, 1000, this.secondsLabels, decimals);
}
formatUnitlessFromTo(
value: any,
units: string,
targetedUnits: string,
decimals: number = 1
): any {
return this.formatFromTo(value, units, targetedUnits, 1000, this.unitlessLabels, decimals);
}
}
| 1,775 | 24.73913 | 100 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/password-policy.service.spec.ts
|
import { HttpClientTestingModule } from '@angular/common/http/testing';
import { TestBed } from '@angular/core/testing';
import { of as observableOf } from 'rxjs';
import { configureTestBed } from '~/testing/unit-test-helper';
import { SettingsService } from '../api/settings.service';
import { SharedModule } from '../shared.module';
import { PasswordPolicyService } from './password-policy.service';
describe('PasswordPolicyService', () => {
let service: PasswordPolicyService;
let settingsService: SettingsService;
const helpTextHelper = {
get: (chk: string) => {
const chkTexts: { [key: string]: string } = {
chk_length: 'Must contain at least 10 characters',
chk_oldpwd: 'Must not be the same as the previous one',
chk_username: 'Cannot contain the username',
chk_exclusion_list: 'Cannot contain any configured keyword',
chk_repetitive: 'Cannot contain any repetitive characters e.g. "aaa"',
chk_sequential: 'Cannot contain any sequential characters e.g. "abc"',
chk_complexity:
'Must consist of characters from the following groups:\n' +
' * Alphabetic a-z, A-Z\n' +
' * Numbers 0-9\n' +
' * Special chars: !"#$%& \'()*+,-./:;<=>?@[\\]^_`{{|}}~\n' +
' * Any other characters (signs)'
};
return ['Required rules for passwords:', '- ' + chkTexts[chk]].join('\n');
}
};
configureTestBed({
imports: [HttpClientTestingModule, SharedModule]
});
beforeEach(() => {
service = TestBed.inject(PasswordPolicyService);
settingsService = TestBed.inject(SettingsService);
settingsService['settings'] = {};
});
it('should be created', () => {
expect(service).toBeTruthy();
});
it('should not get help text', () => {
let helpText = '';
spyOn(settingsService, 'getStandardSettings').and.returnValue(
observableOf({
pwd_policy_enabled: false
})
);
service.getHelpText().subscribe((text) => (helpText = text));
expect(helpText).toBe('');
});
it('should get help text chk_length', () => {
let helpText = '';
const expectedHelpText = helpTextHelper.get('chk_length');
spyOn(settingsService, 'getStandardSettings').and.returnValue(
observableOf({
user_pwd_expiration_warning_1: 10,
user_pwd_expiration_warning_2: 5,
user_pwd_expiration_span: 90,
pwd_policy_enabled: true,
pwd_policy_min_length: 10,
pwd_policy_check_length_enabled: true,
pwd_policy_check_oldpwd_enabled: false,
pwd_policy_check_sequential_chars_enabled: false,
pwd_policy_check_complexity_enabled: false
})
);
service.getHelpText().subscribe((text) => (helpText = text));
expect(helpText).toBe(expectedHelpText);
});
it('should get help text chk_oldpwd', () => {
let helpText = '';
const expectedHelpText = helpTextHelper.get('chk_oldpwd');
spyOn(settingsService, 'getStandardSettings').and.returnValue(
observableOf({
pwd_policy_enabled: true,
pwd_policy_check_oldpwd_enabled: true,
pwd_policy_check_username_enabled: false,
pwd_policy_check_exclusion_list_enabled: false,
pwd_policy_check_complexity_enabled: false
})
);
service.getHelpText().subscribe((text) => (helpText = text));
expect(helpText).toBe(expectedHelpText);
});
it('should get help text chk_username', () => {
let helpText = '';
const expectedHelpText = helpTextHelper.get('chk_username');
spyOn(settingsService, 'getStandardSettings').and.returnValue(
observableOf({
pwd_policy_enabled: true,
pwd_policy_check_oldpwd_enabled: false,
pwd_policy_check_username_enabled: true,
pwd_policy_check_exclusion_list_enabled: false
})
);
service.getHelpText().subscribe((text) => (helpText = text));
expect(helpText).toBe(expectedHelpText);
});
it('should get help text chk_exclusion_list', () => {
let helpText = '';
const expectedHelpText = helpTextHelper.get('chk_exclusion_list');
spyOn(settingsService, 'getStandardSettings').and.returnValue(
observableOf({
pwd_policy_enabled: true,
pwd_policy_check_username_enabled: false,
pwd_policy_check_exclusion_list_enabled: true,
pwd_policy_check_repetitive_chars_enabled: false
})
);
service.getHelpText().subscribe((text) => (helpText = text));
expect(helpText).toBe(expectedHelpText);
});
it('should get help text chk_repetitive', () => {
let helpText = '';
const expectedHelpText = helpTextHelper.get('chk_repetitive');
spyOn(settingsService, 'getStandardSettings').and.returnValue(
observableOf({
user_pwd_expiration_warning_1: 10,
pwd_policy_enabled: true,
pwd_policy_check_oldpwd_enabled: false,
pwd_policy_check_exclusion_list_enabled: false,
pwd_policy_check_repetitive_chars_enabled: true,
pwd_policy_check_sequential_chars_enabled: false,
pwd_policy_check_complexity_enabled: false
})
);
service.getHelpText().subscribe((text) => (helpText = text));
expect(helpText).toBe(expectedHelpText);
});
it('should get help text chk_sequential', () => {
let helpText = '';
const expectedHelpText = helpTextHelper.get('chk_sequential');
spyOn(settingsService, 'getStandardSettings').and.returnValue(
observableOf({
pwd_policy_enabled: true,
pwd_policy_min_length: 8,
pwd_policy_check_length_enabled: false,
pwd_policy_check_oldpwd_enabled: false,
pwd_policy_check_username_enabled: false,
pwd_policy_check_exclusion_list_enabled: false,
pwd_policy_check_repetitive_chars_enabled: false,
pwd_policy_check_sequential_chars_enabled: true,
pwd_policy_check_complexity_enabled: false
})
);
service.getHelpText().subscribe((text) => (helpText = text));
expect(helpText).toBe(expectedHelpText);
});
it('should get help text chk_complexity', () => {
let helpText = '';
const expectedHelpText = helpTextHelper.get('chk_complexity');
spyOn(settingsService, 'getStandardSettings').and.returnValue(
observableOf({
pwd_policy_enabled: true,
pwd_policy_min_length: 8,
pwd_policy_check_length_enabled: false,
pwd_policy_check_oldpwd_enabled: false,
pwd_policy_check_username_enabled: false,
pwd_policy_check_exclusion_list_enabled: false,
pwd_policy_check_repetitive_chars_enabled: false,
pwd_policy_check_sequential_chars_enabled: false,
pwd_policy_check_complexity_enabled: true
})
);
service.getHelpText().subscribe((text) => (helpText = text));
expect(helpText).toBe(expectedHelpText);
});
it('should get too-weak class', () => {
expect(service.mapCreditsToCssClass(0)).toBe('too-weak');
expect(service.mapCreditsToCssClass(9)).toBe('too-weak');
});
it('should get weak class', () => {
expect(service.mapCreditsToCssClass(10)).toBe('weak');
expect(service.mapCreditsToCssClass(14)).toBe('weak');
});
it('should get ok class', () => {
expect(service.mapCreditsToCssClass(15)).toBe('ok');
expect(service.mapCreditsToCssClass(19)).toBe('ok');
});
it('should get strong class', () => {
expect(service.mapCreditsToCssClass(20)).toBe('strong');
expect(service.mapCreditsToCssClass(24)).toBe('strong');
});
it('should get very-strong class', () => {
expect(service.mapCreditsToCssClass(25)).toBe('very-strong');
expect(service.mapCreditsToCssClass(30)).toBe('very-strong');
});
});
| 7,678 | 35.741627 | 80 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/password-policy.service.ts
|
import { Injectable } from '@angular/core';
import _ from 'lodash';
import { Observable } from 'rxjs';
import { map } from 'rxjs/operators';
import { SettingsService } from '../api/settings.service';
import { CdPwdPolicySettings } from '../models/cd-pwd-policy-settings';
@Injectable({
providedIn: 'root'
})
export class PasswordPolicyService {
constructor(private settingsService: SettingsService) {}
getHelpText(): Observable<string> {
return this.settingsService.getStandardSettings().pipe(
map((resp: { [key: string]: any }) => {
const settings = new CdPwdPolicySettings(resp);
let helpText: string[] = [];
if (settings.pwdPolicyEnabled) {
helpText.push($localize`Required rules for passwords:`);
const i18nHelp: { [key: string]: string } = {
pwdPolicyCheckLengthEnabled: $localize`Must contain at least ${settings.pwdPolicyMinLength} characters`,
pwdPolicyCheckOldpwdEnabled: $localize`Must not be the same as the previous one`,
pwdPolicyCheckUsernameEnabled: $localize`Cannot contain the username`,
pwdPolicyCheckExclusionListEnabled: $localize`Cannot contain any configured keyword`,
pwdPolicyCheckRepetitiveCharsEnabled: $localize`Cannot contain any repetitive characters e.g. "aaa"`,
pwdPolicyCheckSequentialCharsEnabled: $localize`Cannot contain any sequential characters e.g. "abc"`,
pwdPolicyCheckComplexityEnabled: $localize`Must consist of characters from the following groups:
* Alphabetic a-z, A-Z
* Numbers 0-9
* Special chars: !"#$%& '()*+,-./:;<=>?@[\\]^_\`{{|}}~
* Any other characters (signs)`
};
helpText = helpText.concat(
_.keys(i18nHelp)
.filter((key) => _.get(settings, key))
.map((key) => '- ' + _.get(i18nHelp, key))
);
}
return helpText.join('\n');
})
);
}
/**
* Helper function to map password policy credits to a CSS class.
* @param credits The password policy credits.
* @return The name of the CSS class.
*/
mapCreditsToCssClass(credits: number): string {
let result = 'very-strong';
if (credits < 10) {
result = 'too-weak';
} else if (credits < 15) {
result = 'weak';
} else if (credits < 20) {
result = 'ok';
} else if (credits < 25) {
result = 'strong';
}
return result;
}
}
| 2,436 | 35.924242 | 116 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/prometheus-alert-formatter.spec.ts
|
import { HttpClientTestingModule } from '@angular/common/http/testing';
import { TestBed } from '@angular/core/testing';
import { ToastrModule } from 'ngx-toastr';
import { configureTestBed, PrometheusHelper } from '~/testing/unit-test-helper';
import { NotificationType } from '../enum/notification-type.enum';
import { CdNotificationConfig } from '../models/cd-notification';
import { PrometheusCustomAlert } from '../models/prometheus-alerts';
import { SharedModule } from '../shared.module';
import { NotificationService } from './notification.service';
import { PrometheusAlertFormatter } from './prometheus-alert-formatter';
describe('PrometheusAlertFormatter', () => {
let service: PrometheusAlertFormatter;
let notificationService: NotificationService;
let prometheus: PrometheusHelper;
configureTestBed({
imports: [ToastrModule.forRoot(), SharedModule, HttpClientTestingModule],
providers: [PrometheusAlertFormatter]
});
beforeEach(() => {
prometheus = new PrometheusHelper();
service = TestBed.inject(PrometheusAlertFormatter);
notificationService = TestBed.inject(NotificationService);
spyOn(notificationService, 'show').and.stub();
});
it('should create', () => {
expect(service).toBeTruthy();
});
describe('sendNotifications', () => {
it('should not call queue notifications with no notification', () => {
service.sendNotifications([]);
expect(notificationService.show).not.toHaveBeenCalled();
});
it('should call queue notifications with notifications', () => {
const notifications = [new CdNotificationConfig(NotificationType.success, 'test')];
service.sendNotifications(notifications);
expect(notificationService.show).toHaveBeenCalledWith(notifications[0]);
});
});
describe('convertToCustomAlert', () => {
it('converts PrometheusAlert', () => {
expect(service.convertToCustomAlerts([prometheus.createAlert('Something')])).toEqual([
{
status: 'active',
name: 'Something',
description: 'Something is active',
url: 'http://Something',
fingerprint: 'Something'
} as PrometheusCustomAlert
]);
});
it('converts PrometheusNotificationAlert', () => {
expect(
service.convertToCustomAlerts([prometheus.createNotificationAlert('Something')])
).toEqual([
{
fingerprint: false,
status: 'active',
name: 'Something',
description: 'Something is firing',
url: 'http://Something'
} as PrometheusCustomAlert
]);
});
});
it('converts custom alert into notification', () => {
const alert: PrometheusCustomAlert = {
status: 'active',
name: 'Some alert',
description: 'Some alert is active',
url: 'http://some-alert',
fingerprint: '42'
};
expect(service.convertAlertToNotification(alert)).toEqual(
new CdNotificationConfig(
NotificationType.error,
'Some alert (active)',
'Some alert is active <a href="http://some-alert" target="_blank">' +
'<i class="fa fa-line-chart"></i></a>',
undefined,
'Prometheus'
)
);
});
});
| 3,223 | 32.583333 | 92 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/prometheus-alert-formatter.ts
|
import { Injectable } from '@angular/core';
import _ from 'lodash';
import { Icons } from '../enum/icons.enum';
import { NotificationType } from '../enum/notification-type.enum';
import { CdNotificationConfig } from '../models/cd-notification';
import {
AlertmanagerAlert,
AlertmanagerNotificationAlert,
PrometheusCustomAlert
} from '../models/prometheus-alerts';
import { NotificationService } from './notification.service';
@Injectable({
providedIn: 'root'
})
export class PrometheusAlertFormatter {
constructor(private notificationService: NotificationService) {}
sendNotifications(notifications: CdNotificationConfig[]) {
notifications.forEach((n) => this.notificationService.show(n));
}
convertToCustomAlerts(
alerts: (AlertmanagerNotificationAlert | AlertmanagerAlert)[]
): PrometheusCustomAlert[] {
return _.uniqWith(
alerts.map((alert) => {
return {
status: _.isObject(alert.status)
? (alert as AlertmanagerAlert).status.state
: this.getPrometheusNotificationStatus(alert as AlertmanagerNotificationAlert),
name: alert.labels.alertname,
url: alert.generatorURL,
description: alert.annotations.description,
fingerprint: _.isObject(alert.status) && (alert as AlertmanagerAlert).fingerprint
};
}),
_.isEqual
) as PrometheusCustomAlert[];
}
/*
* This is needed because NotificationAlerts don't use 'active'
*/
private getPrometheusNotificationStatus(alert: AlertmanagerNotificationAlert): string {
const state = alert.status;
return state === 'firing' ? 'active' : state;
}
convertAlertToNotification(alert: PrometheusCustomAlert): CdNotificationConfig {
return new CdNotificationConfig(
this.formatType(alert.status),
`${alert.name} (${alert.status})`,
this.appendSourceLink(alert, alert.description),
undefined,
'Prometheus'
);
}
private formatType(status: string): NotificationType {
const types = {
error: ['firing', 'active'],
info: ['suppressed', 'unprocessed'],
success: ['resolved']
};
return NotificationType[_.findKey(types, (type) => type.includes(status))];
}
private appendSourceLink(alert: PrometheusCustomAlert, message: string): string {
return `${message} <a href="${alert.url}" target="_blank"><i class="${Icons.lineChart}"></i></a>`;
}
}
| 2,418 | 31.253333 | 102 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/prometheus-alert.service.spec.ts
|
import { HttpClientTestingModule } from '@angular/common/http/testing';
import { TestBed } from '@angular/core/testing';
import { ToastrModule } from 'ngx-toastr';
import { Observable, of } from 'rxjs';
import { configureTestBed, PrometheusHelper } from '~/testing/unit-test-helper';
import { PrometheusService } from '../api/prometheus.service';
import { NotificationType } from '../enum/notification-type.enum';
import { CdNotificationConfig } from '../models/cd-notification';
import { AlertmanagerAlert } from '../models/prometheus-alerts';
import { SharedModule } from '../shared.module';
import { NotificationService } from './notification.service';
import { PrometheusAlertFormatter } from './prometheus-alert-formatter';
import { PrometheusAlertService } from './prometheus-alert.service';
describe('PrometheusAlertService', () => {
let service: PrometheusAlertService;
let notificationService: NotificationService;
let alerts: AlertmanagerAlert[];
let prometheusService: PrometheusService;
let prometheus: PrometheusHelper;
configureTestBed({
imports: [ToastrModule.forRoot(), SharedModule, HttpClientTestingModule],
providers: [PrometheusAlertService, PrometheusAlertFormatter]
});
beforeEach(() => {
prometheus = new PrometheusHelper();
});
it('should create', () => {
expect(TestBed.inject(PrometheusAlertService)).toBeTruthy();
});
describe('test failing status codes and verify disabling of the alertmanager', () => {
const isDisabledByStatusCode = (statusCode: number, expectedStatus: boolean, done: any) => {
service = TestBed.inject(PrometheusAlertService);
prometheusService = TestBed.inject(PrometheusService);
spyOn(prometheusService, 'ifAlertmanagerConfigured').and.callFake((fn) => fn());
spyOn(prometheusService, 'getAlerts').and.returnValue(
new Observable((observer: any) => observer.error({ status: statusCode, error: {} }))
);
const disableFn = spyOn(prometheusService, 'disableAlertmanagerConfig').and.callFake(() => {
expect(expectedStatus).toBe(true);
done();
});
if (!expectedStatus) {
expect(disableFn).not.toHaveBeenCalled();
done();
}
service.getAlerts();
};
it('disables on 504 error which is thrown if the mgr failed', (done) => {
isDisabledByStatusCode(504, true, done);
});
it('disables on 404 error which is thrown if the external api cannot be reached', (done) => {
isDisabledByStatusCode(404, true, done);
});
it('does not disable on 400 error which is thrown if the external api receives unexpected data', (done) => {
isDisabledByStatusCode(400, false, done);
});
});
it('should flatten the response of getRules()', () => {
service = TestBed.inject(PrometheusAlertService);
prometheusService = TestBed.inject(PrometheusService);
spyOn(service['prometheusService'], 'ifPrometheusConfigured').and.callFake((fn) => fn());
spyOn(prometheusService, 'getRules').and.returnValue(
of({
groups: [
{
name: 'group1',
rules: [{ name: 'nearly_full', type: 'alerting' }]
},
{
name: 'test',
rules: [
{ name: 'load_0', type: 'alerting' },
{ name: 'load_1', type: 'alerting' },
{ name: 'load_2', type: 'alerting' }
]
}
]
})
);
service.getRules();
expect(service.rules as any).toEqual([
{ name: 'nearly_full', type: 'alerting', group: 'group1' },
{ name: 'load_0', type: 'alerting', group: 'test' },
{ name: 'load_1', type: 'alerting', group: 'test' },
{ name: 'load_2', type: 'alerting', group: 'test' }
]);
});
describe('refresh', () => {
beforeEach(() => {
service = TestBed.inject(PrometheusAlertService);
service['alerts'] = [];
service['canAlertsBeNotified'] = false;
spyOn(window, 'setTimeout').and.callFake((fn: Function) => fn());
notificationService = TestBed.inject(NotificationService);
spyOn(notificationService, 'show').and.stub();
prometheusService = TestBed.inject(PrometheusService);
spyOn(prometheusService, 'ifAlertmanagerConfigured').and.callFake((fn) => fn());
spyOn(prometheusService, 'getAlerts').and.callFake(() => of(alerts));
alerts = [prometheus.createAlert('alert0')];
service.refresh();
});
it('should not notify on first call', () => {
expect(notificationService.show).not.toHaveBeenCalled();
});
it('should not notify with no change', () => {
service.refresh();
expect(notificationService.show).not.toHaveBeenCalled();
});
it('should notify on alert change', () => {
alerts = [prometheus.createAlert('alert0', 'resolved')];
service.refresh();
expect(notificationService.show).toHaveBeenCalledWith(
new CdNotificationConfig(
NotificationType.success,
'alert0 (resolved)',
'alert0 is resolved ' + prometheus.createLink('http://alert0'),
undefined,
'Prometheus'
)
);
});
it('should not notify on change to suppressed', () => {
alerts = [prometheus.createAlert('alert0', 'suppressed')];
service.refresh();
expect(notificationService.show).not.toHaveBeenCalled();
});
it('should notify on a new alert', () => {
alerts = [prometheus.createAlert('alert1'), prometheus.createAlert('alert0')];
service.refresh();
expect(notificationService.show).toHaveBeenCalledTimes(1);
expect(notificationService.show).toHaveBeenCalledWith(
new CdNotificationConfig(
NotificationType.error,
'alert1 (active)',
'alert1 is active ' + prometheus.createLink('http://alert1'),
undefined,
'Prometheus'
)
);
});
it('should notify a resolved alert if it is not there anymore', () => {
alerts = [];
service.refresh();
expect(notificationService.show).toHaveBeenCalledTimes(1);
expect(notificationService.show).toHaveBeenCalledWith(
new CdNotificationConfig(
NotificationType.success,
'alert0 (resolved)',
'alert0 is active ' + prometheus.createLink('http://alert0'),
undefined,
'Prometheus'
)
);
});
it('should call multiple times for multiple changes', () => {
const alert1 = prometheus.createAlert('alert1');
alerts.push(alert1);
service.refresh();
alerts = [alert1, prometheus.createAlert('alert2')];
service.refresh();
expect(notificationService.show).toHaveBeenCalledTimes(2);
});
});
describe('alert badge', () => {
beforeEach(() => {
service = TestBed.inject(PrometheusAlertService);
prometheusService = TestBed.inject(PrometheusService);
spyOn(prometheusService, 'ifAlertmanagerConfigured').and.callFake((fn) => fn());
spyOn(prometheusService, 'getAlerts').and.callFake(() => of(alerts));
alerts = [
prometheus.createAlert('alert0', 'active'),
prometheus.createAlert('alert1', 'suppressed'),
prometheus.createAlert('alert2', 'suppressed')
];
service.refresh();
});
it('should count active alerts', () => {
service.refresh();
expect(service.activeAlerts).toBe(1);
});
});
});
| 7,446 | 33.637209 | 112 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/prometheus-alert.service.ts
|
import { Injectable } from '@angular/core';
import _ from 'lodash';
import { PrometheusService } from '../api/prometheus.service';
import {
AlertmanagerAlert,
PrometheusCustomAlert,
PrometheusRule
} from '../models/prometheus-alerts';
import { PrometheusAlertFormatter } from './prometheus-alert-formatter';
@Injectable({
providedIn: 'root'
})
export class PrometheusAlertService {
private canAlertsBeNotified = false;
alerts: AlertmanagerAlert[] = [];
rules: PrometheusRule[] = [];
activeAlerts: number;
activeCriticalAlerts: number;
activeWarningAlerts: number;
constructor(
private alertFormatter: PrometheusAlertFormatter,
private prometheusService: PrometheusService
) {}
getAlerts() {
this.prometheusService.ifAlertmanagerConfigured(() => {
this.prometheusService.getAlerts().subscribe(
(alerts) => this.handleAlerts(alerts),
(resp) => {
if ([404, 504].includes(resp.status)) {
this.prometheusService.disableAlertmanagerConfig();
}
}
);
});
}
getRules() {
this.prometheusService.ifPrometheusConfigured(() => {
this.prometheusService.getRules('alerting').subscribe((groups) => {
this.rules = groups['groups'].reduce((acc, group) => {
return acc.concat(
group.rules.map((rule) => {
rule.group = group.name;
return rule;
})
);
}, []);
});
});
}
refresh() {
this.getAlerts();
this.getRules();
}
private handleAlerts(alerts: AlertmanagerAlert[]) {
if (this.canAlertsBeNotified) {
this.notifyOnAlertChanges(alerts, this.alerts);
}
this.activeAlerts = _.reduce<AlertmanagerAlert, number>(
this.alerts,
(result, alert) => (alert.status.state === 'active' ? ++result : result),
0
);
this.activeCriticalAlerts = _.reduce<AlertmanagerAlert, number>(
this.alerts,
(result, alert) =>
alert.status.state === 'active' && alert.labels.severity === 'critical' ? ++result : result,
0
);
this.activeWarningAlerts = _.reduce<AlertmanagerAlert, number>(
this.alerts,
(result, alert) =>
alert.status.state === 'active' && alert.labels.severity === 'warning' ? ++result : result,
0
);
this.alerts = alerts;
this.canAlertsBeNotified = true;
}
private notifyOnAlertChanges(alerts: AlertmanagerAlert[], oldAlerts: AlertmanagerAlert[]) {
const changedAlerts = this.getChangedAlerts(
this.alertFormatter.convertToCustomAlerts(alerts),
this.alertFormatter.convertToCustomAlerts(oldAlerts)
);
const suppressedFiltered = _.filter(changedAlerts, (alert) => {
return alert.status !== 'suppressed';
});
const notifications = suppressedFiltered.map((alert) =>
this.alertFormatter.convertAlertToNotification(alert)
);
this.alertFormatter.sendNotifications(notifications);
}
private getChangedAlerts(alerts: PrometheusCustomAlert[], oldAlerts: PrometheusCustomAlert[]) {
const updatedAndNew = _.differenceWith(alerts, oldAlerts, _.isEqual);
return updatedAndNew.concat(this.getVanishedAlerts(alerts, oldAlerts));
}
private getVanishedAlerts(alerts: PrometheusCustomAlert[], oldAlerts: PrometheusCustomAlert[]) {
return _.differenceWith(oldAlerts, alerts, (a, b) => a.fingerprint === b.fingerprint).map(
(alert) => {
alert.status = 'resolved';
return alert;
}
);
}
}
| 3,510 | 29.530435 | 100 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/prometheus-notification.service.spec.ts
|
import { HttpClientTestingModule } from '@angular/common/http/testing';
import { fakeAsync, TestBed, tick } from '@angular/core/testing';
import { ToastrModule, ToastrService } from 'ngx-toastr';
import { of, throwError } from 'rxjs';
import { configureTestBed, PrometheusHelper } from '~/testing/unit-test-helper';
import { PrometheusService } from '../api/prometheus.service';
import { NotificationType } from '../enum/notification-type.enum';
import { CdNotificationConfig } from '../models/cd-notification';
import { AlertmanagerNotification } from '../models/prometheus-alerts';
import { SharedModule } from '../shared.module';
import { NotificationService } from './notification.service';
import { PrometheusAlertFormatter } from './prometheus-alert-formatter';
import { PrometheusNotificationService } from './prometheus-notification.service';
describe('PrometheusNotificationService', () => {
let service: PrometheusNotificationService;
let notificationService: NotificationService;
let notifications: AlertmanagerNotification[];
let prometheusService: PrometheusService;
let prometheus: PrometheusHelper;
let shown: CdNotificationConfig[];
let getNotificationSinceMock: Function;
const toastFakeService = {
error: () => true,
info: () => true,
success: () => true
};
configureTestBed({
imports: [ToastrModule.forRoot(), SharedModule, HttpClientTestingModule],
providers: [
PrometheusNotificationService,
PrometheusAlertFormatter,
{ provide: ToastrService, useValue: toastFakeService }
]
});
beforeEach(() => {
prometheus = new PrometheusHelper();
service = TestBed.inject(PrometheusNotificationService);
service['notifications'] = [];
notificationService = TestBed.inject(NotificationService);
shown = [];
spyOn(notificationService, 'show').and.callThrough();
spyOn(notificationService, 'save').and.callFake((n) => shown.push(n));
spyOn(window, 'setTimeout').and.callFake((fn: Function) => fn());
prometheusService = TestBed.inject(PrometheusService);
getNotificationSinceMock = () => of(notifications);
spyOn(prometheusService, 'getNotifications').and.callFake(() => getNotificationSinceMock());
notifications = [prometheus.createNotification()];
});
it('should create', () => {
expect(service).toBeTruthy();
});
describe('getLastNotification', () => {
it('returns an empty object on the first call', () => {
service.refresh();
expect(prometheusService.getNotifications).toHaveBeenCalledWith(undefined);
expect(service['notifications'].length).toBe(1);
});
it('returns last notification on any other call', () => {
service.refresh();
notifications = [prometheus.createNotification(1, 'resolved')];
service.refresh();
expect(prometheusService.getNotifications).toHaveBeenCalledWith(service['notifications'][0]);
expect(service['notifications'].length).toBe(2);
notifications = [prometheus.createNotification(2)];
service.refresh();
notifications = [prometheus.createNotification(3, 'resolved')];
service.refresh();
expect(prometheusService.getNotifications).toHaveBeenCalledWith(service['notifications'][2]);
expect(service['notifications'].length).toBe(4);
});
});
it('notifies not on the first call', () => {
service.refresh();
expect(notificationService.save).not.toHaveBeenCalled();
});
it('notifies should not call the api again if it failed once', () => {
getNotificationSinceMock = () => throwError(new Error('Test error'));
service.refresh();
expect(prometheusService.getNotifications).toHaveBeenCalledTimes(1);
expect(service['backendFailure']).toBe(true);
service.refresh();
expect(prometheusService.getNotifications).toHaveBeenCalledTimes(1);
service['backendFailure'] = false;
});
describe('looks of fired notifications', () => {
const asyncRefresh = () => {
service.refresh();
tick(20);
};
const expectShown = (expected: object[]) => {
tick(500);
expect(shown.length).toBe(expected.length);
expected.forEach((e, i) =>
Object.keys(e).forEach((key) => expect(shown[i][key]).toEqual(expected[i][key]))
);
};
beforeEach(() => {
service.refresh();
});
it('notifies on the second call', () => {
service.refresh();
expect(notificationService.show).toHaveBeenCalledTimes(1);
});
it('notify looks on single notification with single alert like', fakeAsync(() => {
asyncRefresh();
expectShown([
new CdNotificationConfig(
NotificationType.error,
'alert0 (active)',
'alert0 is firing ' + prometheus.createLink('http://alert0'),
undefined,
'Prometheus'
)
]);
}));
it('raises multiple pop overs for a single notification with multiple alerts', fakeAsync(() => {
asyncRefresh();
notifications[0].alerts.push(prometheus.createNotificationAlert('alert1', 'resolved'));
asyncRefresh();
expectShown([
new CdNotificationConfig(
NotificationType.error,
'alert0 (active)',
'alert0 is firing ' + prometheus.createLink('http://alert0'),
undefined,
'Prometheus'
),
new CdNotificationConfig(
NotificationType.success,
'alert1 (resolved)',
'alert1 is resolved ' + prometheus.createLink('http://alert1'),
undefined,
'Prometheus'
)
]);
}));
it('should raise multiple notifications if they do not look like each other', fakeAsync(() => {
notifications[0].alerts.push(prometheus.createNotificationAlert('alert1'));
notifications.push(prometheus.createNotification());
notifications[1].alerts.push(prometheus.createNotificationAlert('alert2'));
asyncRefresh();
expectShown([
new CdNotificationConfig(
NotificationType.error,
'alert0 (active)',
'alert0 is firing ' + prometheus.createLink('http://alert0'),
undefined,
'Prometheus'
),
new CdNotificationConfig(
NotificationType.error,
'alert1 (active)',
'alert1 is firing ' + prometheus.createLink('http://alert1'),
undefined,
'Prometheus'
),
new CdNotificationConfig(
NotificationType.error,
'alert2 (active)',
'alert2 is firing ' + prometheus.createLink('http://alert2'),
undefined,
'Prometheus'
)
]);
}));
it('only shows toasties if it got new data', () => {
service.refresh();
expect(notificationService.save).toHaveBeenCalledTimes(1);
notifications = [];
service.refresh();
service.refresh();
expect(notificationService.save).toHaveBeenCalledTimes(1);
notifications = [prometheus.createNotification()];
service.refresh();
expect(notificationService.save).toHaveBeenCalledTimes(2);
service.refresh();
expect(notificationService.save).toHaveBeenCalledTimes(3);
});
it('filters out duplicated and non user visible changes in notifications', fakeAsync(() => {
asyncRefresh();
// Return 2 notifications with 3 duplicated alerts and 1 non visible changed alert
const secondAlert = prometheus.createNotificationAlert('alert0');
secondAlert.endsAt = new Date().toString(); // Should be ignored as it's not visible
notifications[0].alerts.push(secondAlert);
notifications.push(prometheus.createNotification());
notifications[1].alerts.push(prometheus.createNotificationAlert('alert0'));
notifications[1].notified = 'by somebody else';
asyncRefresh();
expectShown([
new CdNotificationConfig(
NotificationType.error,
'alert0 (active)',
'alert0 is firing ' + prometheus.createLink('http://alert0'),
undefined,
'Prometheus'
)
]);
}));
});
});
| 8,087 | 34.473684 | 100 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/prometheus-notification.service.ts
|
import { Injectable } from '@angular/core';
import _ from 'lodash';
import { PrometheusService } from '../api/prometheus.service';
import { CdNotificationConfig } from '../models/cd-notification';
import { AlertmanagerNotification } from '../models/prometheus-alerts';
import { PrometheusAlertFormatter } from './prometheus-alert-formatter';
@Injectable({
providedIn: 'root'
})
export class PrometheusNotificationService {
private notifications: AlertmanagerNotification[];
private backendFailure = false;
constructor(
private alertFormatter: PrometheusAlertFormatter,
private prometheusService: PrometheusService
) {
this.notifications = [];
}
refresh() {
if (this.backendFailure) {
return;
}
this.prometheusService.getNotifications(_.last(this.notifications)).subscribe(
(notifications) => this.handleNotifications(notifications),
() => (this.backendFailure = true)
);
}
private handleNotifications(notifications: AlertmanagerNotification[]) {
if (notifications.length === 0) {
return;
}
if (this.notifications.length > 0) {
this.alertFormatter.sendNotifications(
_.flatten(notifications.map((notification) => this.formatNotification(notification)))
);
}
this.notifications = this.notifications.concat(notifications);
}
private formatNotification(notification: AlertmanagerNotification): CdNotificationConfig[] {
return this.alertFormatter
.convertToCustomAlerts(notification.alerts)
.map((alert) => this.alertFormatter.convertAlertToNotification(alert));
}
}
| 1,601 | 29.807692 | 94 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/prometheus-silence-matcher.service.spec.ts
|
import { TestBed } from '@angular/core/testing';
import { configureTestBed, PrometheusHelper } from '~/testing/unit-test-helper';
import { PrometheusRule } from '../models/prometheus-alerts';
import { SharedModule } from '../shared.module';
import { PrometheusSilenceMatcherService } from './prometheus-silence-matcher.service';
describe('PrometheusSilenceMatcherService', () => {
let service: PrometheusSilenceMatcherService;
let prometheus: PrometheusHelper;
let rules: PrometheusRule[];
configureTestBed({
imports: [SharedModule]
});
const addMatcher = (name: string, value: any) => ({
name: name,
value: value,
isRegex: false
});
beforeEach(() => {
prometheus = new PrometheusHelper();
service = TestBed.inject(PrometheusSilenceMatcherService);
rules = [
prometheus.createRule('alert0', 'someSeverity', [prometheus.createAlert('alert0')]),
prometheus.createRule('alert1', 'someSeverity', []),
prometheus.createRule('alert2', 'someOtherSeverity', [prometheus.createAlert('alert2')])
];
});
it('should create', () => {
expect(service).toBeTruthy();
});
describe('test rule matching with one matcher', () => {
const expectSingleMatch = (
name: string,
value: any,
helpText: string,
successClass: boolean
) => {
const match = service.singleMatch(addMatcher(name, value), rules);
expect(match.status).toBe(helpText);
expect(match.cssClass).toBe(successClass ? 'has-success' : 'has-warning');
};
it('should match no rule and no alert', () => {
expectSingleMatch(
'alertname',
'alert',
'Your matcher seems to match no currently defined rule or active alert.',
false
);
});
it('should match a rule with no alert', () => {
expectSingleMatch('alertname', 'alert1', 'Matches 1 rule with no active alerts.', false);
});
it('should match a rule and an alert', () => {
expectSingleMatch('alertname', 'alert0', 'Matches 1 rule with 1 active alert.', true);
});
it('should match multiple rules and an alert', () => {
expectSingleMatch('severity', 'someSeverity', 'Matches 2 rules with 1 active alert.', true);
});
it('should match multiple rules and multiple alerts', () => {
expectSingleMatch('job', 'someJob', 'Matches 2 rules with 2 active alerts.', true);
});
it('should return any match if regex is checked', () => {
const match = service.singleMatch(
{
name: 'severity',
value: 'someSeverity',
isRegex: true
},
rules
);
expect(match).toBeFalsy();
});
});
describe('test rule matching with multiple matcher', () => {
const expectMultiMatch = (matchers: any[], helpText: string, successClass: boolean) => {
const match = service.multiMatch(matchers, rules);
expect(match.status).toBe(helpText);
expect(match.cssClass).toBe(successClass ? 'has-success' : 'has-warning');
};
it('should match no rule and no alert', () => {
expectMultiMatch(
[addMatcher('alertname', 'alert0'), addMatcher('job', 'ceph')],
'Your matcher seems to match no currently defined rule or active alert.',
false
);
});
it('should match a rule with no alert', () => {
expectMultiMatch(
[addMatcher('severity', 'someSeverity'), addMatcher('alertname', 'alert1')],
'Matches 1 rule with no active alerts.',
false
);
});
it('should match a rule and an alert', () => {
expectMultiMatch(
[addMatcher('instance', 'someInstance'), addMatcher('alertname', 'alert0')],
'Matches 1 rule with 1 active alert.',
true
);
});
it('should return any match if regex is checked', () => {
const match = service.multiMatch(
[
addMatcher('instance', 'someInstance'),
{
name: 'severity',
value: 'someSeverity',
isRegex: true
}
],
rules
);
expect(match).toBeFalsy();
});
});
});
| 4,134 | 29.858209 | 98 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/prometheus-silence-matcher.service.ts
|
import { Injectable } from '@angular/core';
import _ from 'lodash';
import {
AlertmanagerSilenceMatcher,
AlertmanagerSilenceMatcherMatch
} from '../models/alertmanager-silence';
import { PrometheusRule } from '../models/prometheus-alerts';
@Injectable({
providedIn: 'root'
})
export class PrometheusSilenceMatcherService {
private valueAttributePath = {
alertname: 'name',
instance: 'alerts.0.labels.instance',
job: 'alerts.0.labels.job',
severity: 'labels.severity'
};
singleMatch(
matcher: AlertmanagerSilenceMatcher,
rules: PrometheusRule[]
): AlertmanagerSilenceMatcherMatch {
return this.multiMatch([matcher], rules);
}
multiMatch(
matchers: AlertmanagerSilenceMatcher[],
rules: PrometheusRule[]
): AlertmanagerSilenceMatcherMatch {
if (matchers.some((matcher) => matcher.isRegex)) {
return undefined;
}
matchers.forEach((matcher) => {
rules = this.getMatchedRules(matcher, rules);
});
return this.describeMatch(rules);
}
getMatchedRules(matcher: AlertmanagerSilenceMatcher, rules: PrometheusRule[]): PrometheusRule[] {
const attributePath = this.getAttributePath(matcher.name);
return rules.filter((r) => _.get(r, attributePath) === matcher.value);
}
private describeMatch(rules: PrometheusRule[]): AlertmanagerSilenceMatcherMatch {
let alerts = 0;
rules.forEach((r) => (alerts += r.alerts.length));
return {
status: this.getMatchText(rules.length, alerts),
cssClass: alerts ? 'has-success' : 'has-warning'
};
}
getAttributePath(name: string): string {
return this.valueAttributePath[name];
}
private getMatchText(rules: number, alerts: number): string {
const msg = {
noRule: $localize`Your matcher seems to match no currently defined rule or active alert.`,
noAlerts: $localize`no active alerts`,
alert: $localize`1 active alert`,
alerts: $localize`${alerts} active alerts`,
rule: $localize`Matches 1 rule`,
rules: $localize`Matches ${rules} rules`
};
const rule = rules > 1 ? msg.rules : msg.rule;
const alert = alerts ? (alerts > 1 ? msg.alerts : msg.alert) : msg.noAlerts;
return rules ? $localize`${rule} with ${alert}.` : msg.noRule;
}
}
| 2,264 | 28.802632 | 99 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/rbd-configuration.service.spec.ts
|
import { TestBed } from '@angular/core/testing';
import { configureTestBed } from '~/testing/unit-test-helper';
import { RbdConfigurationType } from '../models/configuration';
import { RbdConfigurationService } from './rbd-configuration.service';
describe('RbdConfigurationService', () => {
let service: RbdConfigurationService;
configureTestBed({
providers: [RbdConfigurationService]
});
beforeEach(() => {
service = TestBed.inject(RbdConfigurationService);
});
it('should be created', () => {
expect(service).toBeTruthy();
});
it('should filter config options', () => {
const result = service.getOptionByName('rbd_qos_write_iops_burst');
expect(result).toEqual({
name: 'rbd_qos_write_iops_burst',
displayName: 'Write IOPS Burst',
description: 'The desired burst limit of write operations.',
type: RbdConfigurationType.iops
});
});
it('should return the display name', () => {
const displayName = service.getDisplayName('rbd_qos_write_iops_burst');
expect(displayName).toBe('Write IOPS Burst');
});
it('should return the description', () => {
const description = service.getDescription('rbd_qos_write_iops_burst');
expect(description).toBe('The desired burst limit of write operations.');
});
it('should have a class for each section', () => {
service.sections.forEach((section) => expect(section.class).toBeTruthy());
});
});
| 1,435 | 30.217391 | 78 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/rbd-configuration.service.ts
|
import { Injectable } from '@angular/core';
import {
RbdConfigurationExtraField,
RbdConfigurationSection,
RbdConfigurationType
} from '../models/configuration';
/**
* Define here which options should be made available under which section heading.
* The display name and description needs to be added manually as long as Ceph does not provide
* this information.
*/
@Injectable({
providedIn: 'root'
})
export class RbdConfigurationService {
readonly sections: RbdConfigurationSection[];
constructor() {
this.sections = [
{
heading: $localize`Quality of Service`,
class: 'quality-of-service',
options: [
{
name: 'rbd_qos_bps_limit',
displayName: $localize`BPS Limit`,
description: $localize`The desired limit of IO bytes per second.`,
type: RbdConfigurationType.bps
},
{
name: 'rbd_qos_iops_limit',
displayName: $localize`IOPS Limit`,
description: $localize`The desired limit of IO operations per second.`,
type: RbdConfigurationType.iops
},
{
name: 'rbd_qos_read_bps_limit',
displayName: $localize`Read BPS Limit`,
description: $localize`The desired limit of read bytes per second.`,
type: RbdConfigurationType.bps
},
{
name: 'rbd_qos_read_iops_limit',
displayName: $localize`Read IOPS Limit`,
description: $localize`The desired limit of read operations per second.`,
type: RbdConfigurationType.iops
},
{
name: 'rbd_qos_write_bps_limit',
displayName: $localize`Write BPS Limit`,
description: $localize`The desired limit of write bytes per second.`,
type: RbdConfigurationType.bps
},
{
name: 'rbd_qos_write_iops_limit',
displayName: $localize`Write IOPS Limit`,
description: $localize`The desired limit of write operations per second.`,
type: RbdConfigurationType.iops
},
{
name: 'rbd_qos_bps_burst',
displayName: $localize`BPS Burst`,
description: $localize`The desired burst limit of IO bytes.`,
type: RbdConfigurationType.bps
},
{
name: 'rbd_qos_iops_burst',
displayName: $localize`IOPS Burst`,
description: $localize`The desired burst limit of IO operations.`,
type: RbdConfigurationType.iops
},
{
name: 'rbd_qos_read_bps_burst',
displayName: $localize`Read BPS Burst`,
description: $localize`The desired burst limit of read bytes.`,
type: RbdConfigurationType.bps
},
{
name: 'rbd_qos_read_iops_burst',
displayName: $localize`Read IOPS Burst`,
description: $localize`The desired burst limit of read operations.`,
type: RbdConfigurationType.iops
},
{
name: 'rbd_qos_write_bps_burst',
displayName: $localize`Write BPS Burst`,
description: $localize`The desired burst limit of write bytes.`,
type: RbdConfigurationType.bps
},
{
name: 'rbd_qos_write_iops_burst',
displayName: $localize`Write IOPS Burst`,
description: $localize`The desired burst limit of write operations.`,
type: RbdConfigurationType.iops
}
] as RbdConfigurationExtraField[]
}
];
}
private static getOptionsFromSections(sections: RbdConfigurationSection[]) {
return sections.map((section) => section.options).reduce((a, b) => a.concat(b));
}
private filterConfigOptionsByName(configName: string) {
return RbdConfigurationService.getOptionsFromSections(this.sections).filter(
(option) => option.name === configName
);
}
private getOptionValueByName(configName: string, fieldName: string, defaultValue = '') {
const configOptions = this.filterConfigOptionsByName(configName);
return configOptions.length === 1 ? configOptions.pop()[fieldName] : defaultValue;
}
getWritableSections() {
return this.sections.map((section) => {
section.options = section.options.filter((o) => !o.readOnly);
return section;
});
}
getOptionFields() {
return RbdConfigurationService.getOptionsFromSections(this.sections);
}
getWritableOptionFields() {
return RbdConfigurationService.getOptionsFromSections(this.getWritableSections());
}
getOptionByName(optionName: string): RbdConfigurationExtraField {
return this.filterConfigOptionsByName(optionName).pop();
}
getDisplayName(configName: string): string {
return this.getOptionValueByName(configName, 'displayName');
}
getDescription(configName: string): string {
return this.getOptionValueByName(configName, 'description');
}
}
| 5,021 | 33.634483 | 95 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/refresh-interval.service.spec.ts
|
import { NgZone } from '@angular/core';
import { fakeAsync, TestBed, tick } from '@angular/core/testing';
import { configureTestBed } from '~/testing/unit-test-helper';
import { RefreshIntervalService } from './refresh-interval.service';
describe('RefreshIntervalService', () => {
let service: RefreshIntervalService;
configureTestBed({
providers: [RefreshIntervalService]
});
beforeEach(() => {
service = TestBed.inject(RefreshIntervalService);
});
it('should be created', () => {
expect(service).toBeTruthy();
});
it('should initial private interval time right', () => {
sessionStorage.setItem('dashboard_interval', '10000');
const ngZone = TestBed.inject(NgZone);
service = new RefreshIntervalService(ngZone);
expect(service.getRefreshInterval()).toBe(10000);
});
describe('setRefreshInterval', () => {
let notifyCount: number;
it('should send notification to component at correct interval time when interval changed', fakeAsync(() => {
service.intervalData$.subscribe(() => {
notifyCount++;
});
notifyCount = 0;
service.setRefreshInterval(10000);
tick(10000);
expect(service.getRefreshInterval()).toBe(10000);
expect(notifyCount).toBe(1);
notifyCount = 0;
service.setRefreshInterval(30000);
tick(30000);
expect(service.getRefreshInterval()).toBe(30000);
expect(notifyCount).toBe(1);
service.ngOnDestroy();
}));
});
});
| 1,482 | 26.981132 | 112 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/refresh-interval.service.ts
|
import { Injectable, NgZone, OnDestroy } from '@angular/core';
import { BehaviorSubject, interval, Subscription } from 'rxjs';
@Injectable({
providedIn: 'root'
})
export class RefreshIntervalService implements OnDestroy {
private intervalTime: number;
// Observable sources
private intervalDataSource = new BehaviorSubject(null);
private intervalSubscription: Subscription;
// Observable streams
intervalData$ = this.intervalDataSource.asObservable();
constructor(private ngZone: NgZone) {
const initialInterval = parseInt(sessionStorage.getItem('dashboard_interval'), 10) || 5000;
this.setRefreshInterval(initialInterval);
}
setRefreshInterval(newInterval: number) {
this.intervalTime = newInterval;
sessionStorage.setItem('dashboard_interval', newInterval.toString());
if (this.intervalSubscription) {
this.intervalSubscription.unsubscribe();
}
this.ngZone.runOutsideAngular(() => {
this.intervalSubscription = interval(this.intervalTime).subscribe(() =>
this.ngZone.run(() => {
this.intervalDataSource.next(this.intervalTime);
})
);
});
}
getRefreshInterval() {
return this.intervalTime;
}
ngOnDestroy() {
if (this.intervalSubscription) {
this.intervalSubscription.unsubscribe();
}
}
}
| 1,320 | 27.106383 | 95 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/summary.service.spec.ts
|
import { HttpClient } from '@angular/common/http';
import { fakeAsync, TestBed, tick } from '@angular/core/testing';
import { RouterTestingModule } from '@angular/router/testing';
import { of as observableOf, Subscriber, Subscription } from 'rxjs';
import { configureTestBed } from '~/testing/unit-test-helper';
import { ExecutingTask } from '../models/executing-task';
import { Summary } from '../models/summary.model';
import { AuthStorageService } from './auth-storage.service';
import { SummaryService } from './summary.service';
describe('SummaryService', () => {
let summaryService: SummaryService;
let authStorageService: AuthStorageService;
let subs: Subscription;
const summary: Summary = {
executing_tasks: [],
health_status: 'HEALTH_OK',
mgr_id: 'x',
rbd_mirroring: { errors: 0, warnings: 0 },
rbd_pools: [],
have_mon_connection: true,
finished_tasks: [],
filesystems: [{ id: 1, name: 'cephfs_a' }]
};
const httpClientSpy = {
get: () => observableOf(summary)
};
const nextSummary = (newData: any) => summaryService['summaryDataSource'].next(newData);
configureTestBed({
imports: [RouterTestingModule],
providers: [
SummaryService,
AuthStorageService,
{ provide: HttpClient, useValue: httpClientSpy }
]
});
beforeEach(() => {
summaryService = TestBed.inject(SummaryService);
authStorageService = TestBed.inject(AuthStorageService);
});
it('should be created', () => {
expect(summaryService).toBeTruthy();
});
it('should call refresh', fakeAsync(() => {
authStorageService.set('foobar', undefined, undefined);
const calledWith: any[] = [];
subs = new Subscription();
subs.add(summaryService.startPolling());
tick();
subs.add(
summaryService.subscribe((data) => {
calledWith.push(data);
})
);
expect(calledWith).toEqual([summary]);
subs.add(summaryService.refresh());
expect(calledWith).toEqual([summary, summary]);
tick(summaryService.REFRESH_INTERVAL * 2);
expect(calledWith.length).toEqual(4);
subs.unsubscribe();
}));
describe('Should test subscribe without initial value', () => {
let result: Summary;
let i: number;
const callback = (response: Summary) => {
i++;
result = response;
};
beforeEach(() => {
i = 0;
result = undefined;
nextSummary(undefined);
});
it('should call subscribeOnce', () => {
const subscriber = summaryService.subscribeOnce(callback);
expect(subscriber).toEqual(jasmine.any(Subscriber));
expect(i).toBe(0);
expect(result).toEqual(undefined);
nextSummary(undefined);
expect(i).toBe(0);
expect(result).toEqual(undefined);
expect(subscriber.closed).toBe(false);
nextSummary(summary);
expect(result).toEqual(summary);
expect(i).toBe(1);
expect(subscriber.closed).toBe(true);
nextSummary(summary);
expect(result).toEqual(summary);
expect(i).toBe(1);
});
it('should call subscribe', () => {
const subscriber = summaryService.subscribe(callback);
expect(subscriber).toEqual(jasmine.any(Subscriber));
expect(i).toBe(0);
expect(result).toEqual(undefined);
nextSummary(undefined);
expect(i).toBe(0);
expect(result).toEqual(undefined);
expect(subscriber.closed).toBe(false);
nextSummary(summary);
expect(result).toEqual(summary);
expect(i).toBe(1);
expect(subscriber.closed).toBe(false);
nextSummary(summary);
expect(result).toEqual(summary);
expect(i).toBe(2);
expect(subscriber.closed).toBe(false);
});
});
describe('Should test methods after first refresh', () => {
beforeEach(() => {
authStorageService.set('foobar', undefined, undefined);
summaryService.refresh();
});
it('should call addRunningTask', () => {
summaryService.addRunningTask(
new ExecutingTask('rbd/delete', {
pool_name: 'somePool',
image_name: 'someImage'
})
);
let result: any;
summaryService.subscribeOnce((response) => {
result = response;
});
expect(result.executing_tasks.length).toBe(1);
expect(result.executing_tasks[0]).toEqual({
metadata: { image_name: 'someImage', pool_name: 'somePool' },
name: 'rbd/delete'
});
});
it('should call addRunningTask with duplicate task', () => {
let result: any;
summaryService.subscribe((response) => {
result = response;
});
const exec_task = new ExecutingTask('rbd/delete', {
pool_name: 'somePool',
image_name: 'someImage'
});
result.executing_tasks = [exec_task];
nextSummary(result);
expect(result.executing_tasks.length).toBe(1);
summaryService.addRunningTask(exec_task);
expect(result.executing_tasks.length).toBe(1);
});
});
});
| 4,982 | 26.683333 | 90 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/summary.service.ts
|
import { HttpClient } from '@angular/common/http';
import { Injectable } from '@angular/core';
import _ from 'lodash';
import { BehaviorSubject, Observable, Subscription } from 'rxjs';
import { filter, first } from 'rxjs/operators';
import { ExecutingTask } from '../models/executing-task';
import { Summary } from '../models/summary.model';
import { TimerService } from './timer.service';
@Injectable({
providedIn: 'root'
})
export class SummaryService {
readonly REFRESH_INTERVAL = 5000;
// Observable sources
private summaryDataSource = new BehaviorSubject<Summary>(null);
// Observable streams
summaryData$ = this.summaryDataSource.asObservable();
constructor(private http: HttpClient, private timerService: TimerService) {}
startPolling(): Subscription {
return this.timerService
.get(() => this.retrieveSummaryObservable(), this.REFRESH_INTERVAL)
.subscribe(this.retrieveSummaryObserver());
}
refresh(): Subscription {
return this.retrieveSummaryObservable().subscribe(this.retrieveSummaryObserver());
}
private retrieveSummaryObservable(): Observable<Summary> {
return this.http.get<Summary>('api/summary');
}
private retrieveSummaryObserver(): (data: Summary) => void {
return (data: Summary) => {
this.summaryDataSource.next(data);
};
}
/**
* Subscribes to the summaryData and receive only the first, non undefined, value.
*/
subscribeOnce(next: (summary: Summary) => void, error?: (error: any) => void): Subscription {
return this.summaryData$
.pipe(
filter((value) => !!value),
first()
)
.subscribe(next, error);
}
/**
* Subscribes to the summaryData,
* which is updated periodically or when a new task is created.
* Will receive only non undefined values.
*/
subscribe(next: (summary: Summary) => void, error?: (error: any) => void): Subscription {
return this.summaryData$.pipe(filter((value) => !!value)).subscribe(next, error);
}
/**
* Inserts a newly created task to the local list of executing tasks.
* After that, it will automatically push that new information
* to all subscribers.
*/
addRunningTask(task: ExecutingTask) {
const current = this.summaryDataSource.getValue();
if (!current) {
return;
}
if (_.isArray(current.executing_tasks)) {
const exists = current.executing_tasks.find((element: any) => {
return element.name === task.name && _.isEqual(element.metadata, task.metadata);
});
if (!exists) {
current.executing_tasks.push(task);
}
} else {
current.executing_tasks = [task];
}
this.summaryDataSource.next(current);
}
}
| 2,702 | 29.033333 | 95 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/task-list.service.spec.ts
|
import { HttpClientTestingModule } from '@angular/common/http/testing';
import { TestBed } from '@angular/core/testing';
import { RouterTestingModule } from '@angular/router/testing';
import { of } from 'rxjs';
import { configureTestBed, expectItemTasks } from '~/testing/unit-test-helper';
import { RbdService } from '../api/rbd.service';
import { ExecutingTask } from '../models/executing-task';
import { SummaryService } from './summary.service';
import { TaskListService } from './task-list.service';
import { TaskMessageService } from './task-message.service';
describe('TaskListService', () => {
let service: TaskListService;
let summaryService: SummaryService;
let taskMessageService: TaskMessageService;
let list: any[];
let apiResp: any;
let tasks: any[];
const addItem = (name: string) => {
apiResp.push({ name: name });
};
configureTestBed({
providers: [TaskListService, TaskMessageService, SummaryService, RbdService],
imports: [HttpClientTestingModule, RouterTestingModule]
});
beforeEach(() => {
service = TestBed.inject(TaskListService);
summaryService = TestBed.inject(SummaryService);
taskMessageService = TestBed.inject(TaskMessageService);
summaryService['summaryDataSource'].next({ executing_tasks: [] });
taskMessageService.messages['test/create'] = taskMessageService.messages['rbd/create'];
taskMessageService.messages['test/edit'] = taskMessageService.messages['rbd/edit'];
taskMessageService.messages['test/delete'] = taskMessageService.messages['rbd/delete'];
tasks = [];
apiResp = [];
list = [];
addItem('a');
addItem('b');
addItem('c');
service.init(
() => of(apiResp),
undefined,
(updatedList) => (list = updatedList),
() => true,
(task) => task.name.startsWith('test'),
(item, task) => item.name === task.metadata['name'],
{
default: (metadata: object) => ({ name: metadata['name'] })
}
);
});
it('should be created', () => {
expect(service).toBeTruthy();
});
const addTask = (name: string, itemName: string, progress?: number) => {
const task = new ExecutingTask();
task.name = name;
task.progress = progress;
task.metadata = { name: itemName };
tasks.push(task);
summaryService.addRunningTask(task);
};
it('gets all items without any executing items', () => {
expect(list.length).toBe(3);
expect(list.every((item) => !item.cdExecuting)).toBeTruthy();
});
it('gets an item from a task during creation', () => {
addTask('test/create', 'd');
expect(list.length).toBe(4);
expectItemTasks(list[3], 'Creating');
});
it('shows progress of current task if any above 0', () => {
addTask('test/edit', 'd', 97);
addTask('test/edit', 'e', 0);
expect(list.length).toBe(5);
expectItemTasks(list[3], 'Updating', 97);
expectItemTasks(list[4], 'Updating');
});
it('gets all items with one executing items', () => {
addTask('test/create', 'a');
expect(list.length).toBe(3);
expectItemTasks(list[0], 'Creating');
expectItemTasks(list[1], undefined);
expectItemTasks(list[2], undefined);
});
it('gets all items with multiple executing items', () => {
addTask('test/create', 'a');
addTask('test/edit', 'a');
addTask('test/delete', 'a');
addTask('test/edit', 'b');
addTask('test/delete', 'b');
addTask('test/delete', 'c');
expect(list.length).toBe(3);
expectItemTasks(list[0], 'Creating..., Updating..., Deleting');
expectItemTasks(list[1], 'Updating..., Deleting');
expectItemTasks(list[2], 'Deleting');
});
it('gets all items with multiple executing tasks (not only item tasks', () => {
addTask('rbd/create', 'a');
addTask('rbd/edit', 'a');
addTask('test/delete', 'a');
addTask('test/edit', 'b');
addTask('rbd/delete', 'b');
addTask('rbd/delete', 'c');
expect(list.length).toBe(3);
expectItemTasks(list[0], 'Deleting');
expectItemTasks(list[1], 'Updating');
expectItemTasks(list[2], undefined);
});
it('should call ngOnDestroy', () => {
expect(service.summaryDataSubscription.closed).toBeFalsy();
service.ngOnDestroy();
expect(service.summaryDataSubscription.closed).toBeTruthy();
});
});
| 4,288 | 31.007463 | 91 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/task-list.service.ts
|
import { Injectable, OnDestroy } from '@angular/core';
import { Observable, Subscription } from 'rxjs';
import { ExecutingTask } from '../models/executing-task';
import { Summary } from '../models/summary.model';
import { SummaryService } from './summary.service';
import { TaskMessageService } from './task-message.service';
@Injectable()
export class TaskListService implements OnDestroy {
summaryDataSubscription: Subscription;
getUpdate: (context?: any) => Observable<object>;
preProcessing: (_: any) => any[];
setList: (_: any[]) => void;
onFetchError: (error: any) => void;
taskFilter: (task: ExecutingTask) => boolean;
itemFilter: (item: any, task: ExecutingTask) => boolean;
builders: object;
summary: Summary;
constructor(
private taskMessageService: TaskMessageService,
private summaryService: SummaryService
) {}
/**
* @param {() => Observable<object>} getUpdate Method that calls the api and
* returns that without subscribing.
* @param {(_: any) => any[]} preProcessing Method executed before merging
* Tasks with Items
* @param {(_: any[]) => void} setList Method used to update array of item in the component.
* @param {(error: any) => void} onFetchError Method called when there were
* problems while fetching data.
* @param {(task: ExecutingTask) => boolean} taskFilter callback used in tasks_array.filter()
* @param {(item, task: ExecutingTask) => boolean} itemFilter callback used in
* items_array.filter()
* @param {object} builders
* object with builders for each type of task.
* You can also use a 'default' one.
* @memberof TaskListService
*/
init(
getUpdate: (context?: any) => Observable<object>,
preProcessing: (_: any) => any[],
setList: (_: any[]) => void,
onFetchError: (error: any) => void,
taskFilter: (task: ExecutingTask) => boolean,
itemFilter: (item: any, task: ExecutingTask) => boolean,
builders: object
) {
this.getUpdate = getUpdate;
this.preProcessing = preProcessing;
this.setList = setList;
this.onFetchError = onFetchError;
this.taskFilter = taskFilter;
this.itemFilter = itemFilter;
this.builders = builders || {};
this.summaryDataSubscription = this.summaryService.subscribe((summary) => {
this.summary = summary;
this.fetch();
}, this.onFetchError);
}
fetch(context: any = null) {
this.getUpdate(context).subscribe((resp: any) => {
this.updateData(resp, this.summary?.['executing_tasks'].filter(this.taskFilter));
}, this.onFetchError);
}
private updateData(resp: any, tasks: ExecutingTask[]) {
const data: any[] = this.preProcessing ? this.preProcessing(resp) : resp;
this.addMissing(data, tasks);
data.forEach((item) => {
const executingTasks = tasks.filter((task) => this.itemFilter(item, task));
item.cdExecuting = this.getTaskAction(executingTasks);
});
this.setList(data);
}
private addMissing(data: any[], tasks: ExecutingTask[]) {
const defaultBuilder = this.builders['default'];
tasks?.forEach((task) => {
const existing = data.find((item) => this.itemFilter(item, task));
const builder = this.builders[task.name];
if (!existing && (builder || defaultBuilder)) {
data.push(builder ? builder(task.metadata) : defaultBuilder(task.metadata));
}
});
}
private getTaskAction(tasks: ExecutingTask[]): string {
if (tasks.length === 0) {
return undefined;
}
return tasks
.map((task) => {
const progress = task.progress ? ` ${task.progress}%` : '';
return this.taskMessageService.getRunningText(task) + '...' + progress;
})
.join(', ');
}
ngOnDestroy() {
if (this.summaryDataSubscription) {
this.summaryDataSubscription.unsubscribe();
}
}
}
| 3,835 | 33.25 | 95 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/task-manager.service.spec.ts
|
import { fakeAsync, TestBed, tick } from '@angular/core/testing';
import _ from 'lodash';
import { BehaviorSubject } from 'rxjs';
import { configureTestBed } from '~/testing/unit-test-helper';
import { SummaryService } from './summary.service';
import { TaskManagerService } from './task-manager.service';
const summary: Record<string, any> = {
executing_tasks: [],
health_status: 'HEALTH_OK',
mgr_id: 'x',
rbd_mirroring: { errors: 0, warnings: 0 },
rbd_pools: [],
have_mon_connection: true,
finished_tasks: [{ name: 'foo', metadata: {} }],
filesystems: [{ id: 1, name: 'cephfs_a' }]
};
export class SummaryServiceMock {
summaryDataSource = new BehaviorSubject(summary);
summaryData$ = this.summaryDataSource.asObservable();
refresh() {
this.summaryDataSource.next(summary);
}
subscribe(call: any) {
return this.summaryData$.subscribe(call);
}
}
describe('TaskManagerService', () => {
let taskManagerService: TaskManagerService;
let summaryService: any;
let called: boolean;
configureTestBed({
providers: [TaskManagerService, { provide: SummaryService, useClass: SummaryServiceMock }]
});
beforeEach(() => {
taskManagerService = TestBed.inject(TaskManagerService);
summaryService = TestBed.inject(SummaryService);
called = false;
taskManagerService.subscribe('foo', {}, () => (called = true));
});
it('should be created', () => {
expect(taskManagerService).toBeTruthy();
});
it('should subscribe and be notified when task is finished', fakeAsync(() => {
expect(taskManagerService.subscriptions.length).toBe(1);
summaryService.refresh();
tick();
taskManagerService.init(summaryService);
expect(called).toEqual(true);
expect(taskManagerService.subscriptions).toEqual([]);
}));
it('should subscribe and process executing taks', fakeAsync(() => {
const original_subscriptions = _.cloneDeep(taskManagerService.subscriptions);
_.assign(summary, {
executing_tasks: [{ name: 'foo', metadata: {} }],
finished_tasks: []
});
summaryService.refresh();
tick();
expect(taskManagerService.subscriptions).toEqual(original_subscriptions);
}));
});
| 2,188 | 28.986301 | 94 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/task-manager.service.ts
|
import { Injectable } from '@angular/core';
import _ from 'lodash';
import { ExecutingTask } from '../models/executing-task';
import { FinishedTask } from '../models/finished-task';
import { Task } from '../models/task';
import { SummaryService } from './summary.service';
class TaskSubscription {
name: string;
metadata: object;
onTaskFinished: (finishedTask: FinishedTask) => any;
constructor(name: string, metadata: object, onTaskFinished: any) {
this.name = name;
this.metadata = metadata;
this.onTaskFinished = onTaskFinished;
}
}
@Injectable({
providedIn: 'root'
})
export class TaskManagerService {
subscriptions: Array<TaskSubscription> = [];
init(summaryService: SummaryService) {
return summaryService.subscribe((summary) => {
const executingTasks = summary.executing_tasks;
const finishedTasks = summary.finished_tasks;
const newSubscriptions: Array<TaskSubscription> = [];
for (const subscription of this.subscriptions) {
const finishedTask = <FinishedTask>this._getTask(subscription, finishedTasks);
const executingTask = <ExecutingTask>this._getTask(subscription, executingTasks);
if (finishedTask !== null && executingTask === null) {
subscription.onTaskFinished(finishedTask);
}
if (executingTask !== null) {
newSubscriptions.push(subscription);
}
this.subscriptions = newSubscriptions;
}
});
}
subscribe(name: string, metadata: object, onTaskFinished: (finishedTask: FinishedTask) => any) {
this.subscriptions.push(new TaskSubscription(name, metadata, onTaskFinished));
}
private _getTask(subscription: TaskSubscription, tasks: Array<Task>): Task {
for (const task of tasks) {
if (task.name === subscription.name && _.isEqual(task.metadata, subscription.metadata)) {
return task;
}
}
return null;
}
}
| 1,914 | 30.916667 | 98 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/task-message.service.spec.ts
|
import { HttpClientTestingModule } from '@angular/common/http/testing';
import { TestBed } from '@angular/core/testing';
import _ from 'lodash';
import { configureTestBed } from '~/testing/unit-test-helper';
import { RbdService } from '../api/rbd.service';
import { FinishedTask } from '../models/finished-task';
import { TaskException } from '../models/task-exception';
import { TaskMessageOperation, TaskMessageService } from './task-message.service';
describe('TaskManagerMessageService', () => {
let service: TaskMessageService;
let finishedTask: FinishedTask;
configureTestBed({
providers: [TaskMessageService, RbdService],
imports: [HttpClientTestingModule]
});
beforeEach(() => {
service = TestBed.inject(TaskMessageService);
finishedTask = new FinishedTask();
finishedTask.duration = 30;
});
it('should be created', () => {
expect(service).toBeTruthy();
});
it('should get default description', () => {
expect(service.getErrorTitle(finishedTask)).toBe('Failed to execute unknown task');
});
it('should get default running message', () => {
expect(service.getRunningTitle(finishedTask)).toBe('Executing unknown task');
});
it('should get default running message with a set component', () => {
finishedTask.metadata = { component: 'rbd' };
expect(service.getRunningTitle(finishedTask)).toBe('Executing RBD');
});
it('should getSuccessMessage', () => {
expect(service.getSuccessTitle(finishedTask)).toBe('Executed unknown task');
});
describe('defined tasks messages', () => {
let defaultMsg: string;
const testMessages = (operation: TaskMessageOperation, involves: string) => {
expect(service.getRunningTitle(finishedTask)).toBe(operation.running + ' ' + involves);
expect(service.getErrorTitle(finishedTask)).toBe(
'Failed to ' + operation.failure + ' ' + involves
);
expect(service.getSuccessTitle(finishedTask)).toBe(`${operation.success} ${involves}`);
};
const testCreate = (involves: string) => {
testMessages(new TaskMessageOperation('Creating', 'create', 'Created'), involves);
};
const testUpdate = (involves: string) => {
testMessages(new TaskMessageOperation('Updating', 'update', 'Updated'), involves);
};
const testDelete = (involves: string) => {
testMessages(new TaskMessageOperation('Deleting', 'delete', 'Deleted'), involves);
};
const testImport = (involves: string) => {
testMessages(new TaskMessageOperation('Importing', 'import', 'Imported'), involves);
};
const testErrorCode = (code: number, msg: string) => {
finishedTask.exception = _.assign(new TaskException(), {
code: code
});
expect(service.getErrorMessage(finishedTask)).toBe(msg);
};
describe('pool tasks', () => {
beforeEach(() => {
const metadata = {
pool_name: 'somePool'
};
defaultMsg = `pool '${metadata.pool_name}'`;
finishedTask.metadata = metadata;
});
it('tests pool/create messages', () => {
finishedTask.name = 'pool/create';
testCreate(defaultMsg);
testErrorCode(17, `Name is already used by ${defaultMsg}.`);
});
it('tests pool/edit messages', () => {
finishedTask.name = 'pool/edit';
testUpdate(defaultMsg);
testErrorCode(17, `Name is already used by ${defaultMsg}.`);
});
it('tests pool/delete messages', () => {
finishedTask.name = 'pool/delete';
testDelete(defaultMsg);
});
});
describe('erasure code profile tasks', () => {
beforeEach(() => {
const metadata = {
name: 'someEcpName'
};
defaultMsg = `erasure code profile '${metadata.name}'`;
finishedTask.metadata = metadata;
});
it('tests ecp/create messages', () => {
finishedTask.name = 'ecp/create';
testCreate(defaultMsg);
testErrorCode(17, `Name is already used by ${defaultMsg}.`);
});
it('tests ecp/delete messages', () => {
finishedTask.name = 'ecp/delete';
testDelete(defaultMsg);
});
});
describe('crush rule tasks', () => {
beforeEach(() => {
const metadata = {
name: 'someRuleName'
};
defaultMsg = `crush rule '${metadata.name}'`;
finishedTask.metadata = metadata;
});
it('tests crushRule/create messages', () => {
finishedTask.name = 'crushRule/create';
testCreate(defaultMsg);
testErrorCode(17, `Name is already used by ${defaultMsg}.`);
});
it('tests crushRule/delete messages', () => {
finishedTask.name = 'crushRule/delete';
testDelete(defaultMsg);
});
});
describe('rbd tasks', () => {
let metadata: Record<string, any>;
let childMsg: string;
let destinationMsg: string;
let snapMsg: string;
beforeEach(() => {
metadata = {
pool_name: 'somePool',
image_name: 'someImage',
image_id: '12345',
image_spec: 'somePool/someImage',
image_id_spec: 'somePool/12345',
snapshot_name: 'someSnapShot',
dest_pool_name: 'someDestinationPool',
dest_image_name: 'someDestinationImage',
child_pool_name: 'someChildPool',
child_image_name: 'someChildImage',
new_image_name: 'someImage2'
};
defaultMsg = `RBD '${metadata.pool_name}/${metadata.image_name}'`;
childMsg = `RBD '${metadata.child_pool_name}/${metadata.child_image_name}'`;
destinationMsg = `RBD '${metadata.dest_pool_name}/${metadata.dest_image_name}'`;
snapMsg = `RBD snapshot '${metadata.pool_name}/${metadata.image_name}@${metadata.snapshot_name}'`;
finishedTask.metadata = metadata;
});
it('tests rbd/create messages', () => {
finishedTask.name = 'rbd/create';
testCreate(defaultMsg);
testErrorCode(17, `Name is already used by ${defaultMsg}.`);
});
it('tests rbd/edit messages', () => {
finishedTask.name = 'rbd/edit';
testUpdate(defaultMsg);
testErrorCode(17, `Name is already used by ${defaultMsg}.`);
});
it('tests rbd/delete messages', () => {
finishedTask.name = 'rbd/delete';
testDelete(defaultMsg);
testErrorCode(16, `${defaultMsg} is busy.`);
testErrorCode(39, `${defaultMsg} contains snapshots.`);
});
it('tests rbd/clone messages', () => {
finishedTask.name = 'rbd/clone';
testMessages(new TaskMessageOperation('Cloning', 'clone', 'Cloned'), childMsg);
testErrorCode(17, `Name is already used by ${childMsg}.`);
testErrorCode(22, `Snapshot of ${childMsg} must be protected.`);
});
it('tests rbd/copy messages', () => {
finishedTask.name = 'rbd/copy';
testMessages(new TaskMessageOperation('Copying', 'copy', 'Copied'), destinationMsg);
testErrorCode(17, `Name is already used by ${destinationMsg}.`);
});
it('tests rbd/flatten messages', () => {
finishedTask.name = 'rbd/flatten';
testMessages(new TaskMessageOperation('Flattening', 'flatten', 'Flattened'), defaultMsg);
});
it('tests rbd/snap/create messages', () => {
finishedTask.name = 'rbd/snap/create';
testCreate(snapMsg);
testErrorCode(17, `Name is already used by ${snapMsg}.`);
});
it('tests rbd/snap/edit messages', () => {
finishedTask.name = 'rbd/snap/edit';
testUpdate(snapMsg);
testErrorCode(16, `Cannot unprotect ${snapMsg} because it contains child images.`);
});
it('tests rbd/snap/delete messages', () => {
finishedTask.name = 'rbd/snap/delete';
testDelete(snapMsg);
testErrorCode(16, `Cannot delete ${snapMsg} because it's protected.`);
});
it('tests rbd/snap/rollback messages', () => {
finishedTask.name = 'rbd/snap/rollback';
testMessages(new TaskMessageOperation('Rolling back', 'rollback', 'Rolled back'), snapMsg);
});
it('tests rbd/trash/move messages', () => {
finishedTask.name = 'rbd/trash/move';
testMessages(
new TaskMessageOperation('Moving', 'move', 'Moved'),
`image '${metadata.image_spec}' to trash`
);
testErrorCode(2, `Could not find image.`);
});
it('tests rbd/trash/restore messages', () => {
finishedTask.name = 'rbd/trash/restore';
testMessages(
new TaskMessageOperation('Restoring', 'restore', 'Restored'),
`image '${metadata.image_id_spec}' into '${metadata.new_image_name}'`
);
testErrorCode(17, `Image name '${metadata.new_image_name}' is already in use.`);
});
it('tests rbd/trash/remove messages', () => {
finishedTask.name = 'rbd/trash/remove';
testDelete(`image '${metadata.image_id_spec}'`);
});
it('tests rbd/trash/purge messages', () => {
finishedTask.name = 'rbd/trash/purge';
testMessages(
new TaskMessageOperation('Purging', 'purge', 'Purged'),
`images from '${metadata.pool_name}'`
);
});
});
describe('rbd tasks', () => {
let metadata;
let modeMsg: string;
let peerMsg: string;
beforeEach(() => {
metadata = {
pool_name: 'somePool'
};
modeMsg = `mirror mode for pool '${metadata.pool_name}'`;
peerMsg = `mirror peer for pool '${metadata.pool_name}'`;
finishedTask.metadata = metadata;
});
it('tests rbd/mirroring/site_name/edit messages', () => {
finishedTask.name = 'rbd/mirroring/site_name/edit';
testUpdate('mirroring site name');
});
it('tests rbd/mirroring/bootstrap/create messages', () => {
finishedTask.name = 'rbd/mirroring/bootstrap/create';
testCreate('bootstrap token');
});
it('tests rbd/mirroring/bootstrap/import messages', () => {
finishedTask.name = 'rbd/mirroring/bootstrap/import';
testImport('bootstrap token');
});
it('tests rbd/mirroring/pool/edit messages', () => {
finishedTask.name = 'rbd/mirroring/pool/edit';
testUpdate(modeMsg);
testErrorCode(16, 'Cannot disable mirroring because it contains a peer.');
});
it('tests rbd/mirroring/peer/edit messages', () => {
finishedTask.name = 'rbd/mirroring/peer/edit';
testUpdate(peerMsg);
});
it('tests rbd/mirroring/peer/add messages', () => {
finishedTask.name = 'rbd/mirroring/peer/add';
testCreate(peerMsg);
});
it('tests rbd/mirroring/peer/delete messages', () => {
finishedTask.name = 'rbd/mirroring/peer/delete';
testDelete(peerMsg);
});
});
});
});
| 10,908 | 33.853035 | 106 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/task-message.service.ts
|
import { Injectable } from '@angular/core';
import _ from 'lodash';
import { Components } from '../enum/components.enum';
import { FinishedTask } from '../models/finished-task';
import { ImageSpec } from '../models/image-spec';
import { Task } from '../models/task';
export class TaskMessageOperation {
running: string;
failure: string;
success: string;
constructor(running: string, failure: string, success: string) {
this.running = running;
this.failure = failure;
this.success = success;
}
}
class TaskMessage {
operation: TaskMessageOperation;
involves: (object: any) => string;
errors: (metadata: any) => object;
failure(metadata: any): string {
return $localize`Failed to ${this.operation.failure} ${this.involves(metadata)}`;
}
running(metadata: any): string {
return `${this.operation.running} ${this.involves(metadata)}`;
}
success(metadata: any): string {
return `${this.operation.success} ${this.involves(metadata)}`;
}
constructor(
operation: TaskMessageOperation,
involves: (metadata: any) => string,
errors?: (metadata: any) => object
) {
this.operation = operation;
this.involves = involves;
this.errors = errors || (() => ({}));
}
}
@Injectable({
providedIn: 'root'
})
export class TaskMessageService {
defaultMessage = this.newTaskMessage(
new TaskMessageOperation($localize`Executing`, $localize`execute`, $localize`Executed`),
(metadata) => {
return (
(metadata && (Components[metadata.component] || metadata.component)) ||
$localize`unknown task`
);
},
() => {
return {};
}
);
commonOperations = {
create: new TaskMessageOperation($localize`Creating`, $localize`create`, $localize`Created`),
update: new TaskMessageOperation($localize`Updating`, $localize`update`, $localize`Updated`),
delete: new TaskMessageOperation($localize`Deleting`, $localize`delete`, $localize`Deleted`),
add: new TaskMessageOperation($localize`Adding`, $localize`add`, $localize`Added`),
remove: new TaskMessageOperation($localize`Removing`, $localize`remove`, $localize`Removed`),
import: new TaskMessageOperation($localize`Importing`, $localize`import`, $localize`Imported`)
};
rbd = {
default: (metadata: any) => $localize`RBD '${metadata.image_spec}'`,
create: (metadata: any) => {
const id = new ImageSpec(
metadata.pool_name,
metadata.namespace,
metadata.image_name
).toString();
return $localize`RBD '${id}'`;
},
child: (metadata: any) => {
const id = new ImageSpec(
metadata.child_pool_name,
metadata.child_namespace,
metadata.child_image_name
).toString();
return $localize`RBD '${id}'`;
},
destination: (metadata: any) => {
const id = new ImageSpec(
metadata.dest_pool_name,
metadata.dest_namespace,
metadata.dest_image_name
).toString();
return $localize`RBD '${id}'`;
},
snapshot: (metadata: any) =>
$localize`RBD snapshot '${metadata.image_spec}@${metadata.snapshot_name}'`
};
rbd_mirroring = {
site_name: () => $localize`mirroring site name`,
bootstrap: () => $localize`bootstrap token`,
pool: (metadata: any) => $localize`mirror mode for pool '${metadata.pool_name}'`,
pool_peer: (metadata: any) => $localize`mirror peer for pool '${metadata.pool_name}'`
};
grafana = {
update_dashboards: () => $localize`all dashboards`
};
messages = {
// Host tasks
'host/add': this.newTaskMessage(this.commonOperations.add, (metadata) => this.host(metadata)),
'host/remove': this.newTaskMessage(this.commonOperations.remove, (metadata) =>
this.host(metadata)
),
'host/identify_device': this.newTaskMessage(
new TaskMessageOperation($localize`Identifying`, $localize`identify`, $localize`Identified`),
(metadata) => $localize`device '${metadata.device}' on host '${metadata.hostname}'`
),
// OSD tasks
'osd/create': this.newTaskMessage(
this.commonOperations.create,
(metadata) => $localize`OSDs (DriveGroups: ${metadata.tracking_id})`
),
'osd/delete': this.newTaskMessage(this.commonOperations.delete, (metadata) =>
this.osd(metadata)
),
// Pool tasks
'pool/create': this.newTaskMessage(
this.commonOperations.create,
(metadata) => this.pool(metadata),
(metadata) => ({
'17': $localize`Name is already used by ${this.pool(metadata)}.`
})
),
'pool/edit': this.newTaskMessage(
this.commonOperations.update,
(metadata) => this.pool(metadata),
(metadata) => ({
'17': $localize`Name is already used by ${this.pool(metadata)}.`
})
),
'pool/delete': this.newTaskMessage(this.commonOperations.delete, (metadata) =>
this.pool(metadata)
),
// Erasure code profile tasks
'ecp/create': this.newTaskMessage(
this.commonOperations.create,
(metadata) => this.ecp(metadata),
(metadata) => ({
'17': $localize`Name is already used by ${this.ecp(metadata)}.`
})
),
'ecp/delete': this.newTaskMessage(this.commonOperations.delete, (metadata) =>
this.ecp(metadata)
),
// Crush rule tasks
'crushRule/create': this.newTaskMessage(
this.commonOperations.create,
(metadata) => this.crushRule(metadata),
(metadata) => ({
'17': $localize`Name is already used by ${this.crushRule(metadata)}.`
})
),
'crushRule/delete': this.newTaskMessage(this.commonOperations.delete, (metadata) =>
this.crushRule(metadata)
),
// RBD tasks
'rbd/create': this.newTaskMessage(
this.commonOperations.create,
this.rbd.create,
(metadata) => ({
'17': $localize`Name is already used by ${this.rbd.create(metadata)}.`
})
),
'rbd/edit': this.newTaskMessage(this.commonOperations.update, this.rbd.default, (metadata) => ({
'17': $localize`Name is already used by ${this.rbd.default(metadata)}.`
})),
'rbd/delete': this.newTaskMessage(
this.commonOperations.delete,
this.rbd.default,
(metadata) => ({
'16': $localize`${this.rbd.default(metadata)} is busy.`,
'39': $localize`${this.rbd.default(metadata)} contains snapshots.`
})
),
'rbd/clone': this.newTaskMessage(
new TaskMessageOperation($localize`Cloning`, $localize`clone`, $localize`Cloned`),
this.rbd.child,
(metadata) => ({
'17': $localize`Name is already used by ${this.rbd.child(metadata)}.`,
'22': $localize`Snapshot of ${this.rbd.child(metadata)} must be protected.`
})
),
'rbd/copy': this.newTaskMessage(
new TaskMessageOperation($localize`Copying`, $localize`copy`, $localize`Copied`),
this.rbd.destination,
(metadata) => ({
'17': $localize`Name is already used by ${this.rbd.destination(metadata)}.`
})
),
'rbd/flatten': this.newTaskMessage(
new TaskMessageOperation($localize`Flattening`, $localize`flatten`, $localize`Flattened`),
this.rbd.default
),
// RBD snapshot tasks
'rbd/snap/create': this.newTaskMessage(
this.commonOperations.create,
this.rbd.snapshot,
(metadata) => ({
'17': $localize`Name is already used by ${this.rbd.snapshot(metadata)}.`
})
),
'rbd/snap/edit': this.newTaskMessage(
this.commonOperations.update,
this.rbd.snapshot,
(metadata) => ({
'16': $localize`Cannot unprotect ${this.rbd.snapshot(
metadata
)} because it contains child images.`
})
),
'rbd/snap/delete': this.newTaskMessage(
this.commonOperations.delete,
this.rbd.snapshot,
(metadata) => ({
'16': $localize`Cannot delete ${this.rbd.snapshot(metadata)} because it's protected.`
})
),
'rbd/snap/rollback': this.newTaskMessage(
new TaskMessageOperation(
$localize`Rolling back`,
$localize`rollback`,
$localize`Rolled back`
),
this.rbd.snapshot
),
// RBD trash tasks
'rbd/trash/move': this.newTaskMessage(
new TaskMessageOperation($localize`Moving`, $localize`move`, $localize`Moved`),
(metadata) => $localize`image '${metadata.image_spec}' to trash`,
() => ({
2: $localize`Could not find image.`
})
),
'rbd/trash/restore': this.newTaskMessage(
new TaskMessageOperation($localize`Restoring`, $localize`restore`, $localize`Restored`),
(metadata) => $localize`image '${metadata.image_id_spec}' into '${metadata.new_image_name}'`,
(metadata) => ({
17: $localize`Image name '${metadata.new_image_name}' is already in use.`
})
),
'rbd/trash/remove': this.newTaskMessage(
new TaskMessageOperation($localize`Deleting`, $localize`delete`, $localize`Deleted`),
(metadata) => $localize`image '${metadata.image_id_spec}'`
),
'rbd/trash/purge': this.newTaskMessage(
new TaskMessageOperation($localize`Purging`, $localize`purge`, $localize`Purged`),
(metadata) => {
let message = $localize`all pools`;
if (metadata.pool_name) {
message = `'${metadata.pool_name}'`;
}
return $localize`images from ${message}`;
}
),
// RBD mirroring tasks
'rbd/mirroring/site_name/edit': this.newTaskMessage(
this.commonOperations.update,
this.rbd_mirroring.site_name,
() => ({})
),
'rbd/mirroring/bootstrap/create': this.newTaskMessage(
this.commonOperations.create,
this.rbd_mirroring.bootstrap,
() => ({})
),
'rbd/mirroring/bootstrap/import': this.newTaskMessage(
this.commonOperations.import,
this.rbd_mirroring.bootstrap,
() => ({})
),
'rbd/mirroring/pool/edit': this.newTaskMessage(
this.commonOperations.update,
this.rbd_mirroring.pool,
() => ({
16: $localize`Cannot disable mirroring because it contains a peer.`
})
),
'rbd/mirroring/peer/add': this.newTaskMessage(
this.commonOperations.create,
this.rbd_mirroring.pool_peer,
() => ({})
),
'rbd/mirroring/peer/edit': this.newTaskMessage(
this.commonOperations.update,
this.rbd_mirroring.pool_peer,
() => ({})
),
'rbd/mirroring/peer/delete': this.newTaskMessage(
this.commonOperations.delete,
this.rbd_mirroring.pool_peer,
() => ({})
),
// iSCSI target tasks
'iscsi/target/create': this.newTaskMessage(this.commonOperations.create, (metadata) =>
this.iscsiTarget(metadata)
),
'iscsi/target/edit': this.newTaskMessage(this.commonOperations.update, (metadata) =>
this.iscsiTarget(metadata)
),
'iscsi/target/delete': this.newTaskMessage(this.commonOperations.delete, (metadata) =>
this.iscsiTarget(metadata)
),
'nfs/create': this.newTaskMessage(this.commonOperations.create, (metadata) =>
this.nfs(metadata)
),
'nfs/edit': this.newTaskMessage(this.commonOperations.update, (metadata) => this.nfs(metadata)),
'nfs/delete': this.newTaskMessage(this.commonOperations.delete, (metadata) =>
this.nfs(metadata)
),
// Grafana tasks
'grafana/dashboards/update': this.newTaskMessage(
this.commonOperations.update,
this.grafana.update_dashboards,
() => ({})
),
// Service tasks
'service/create': this.newTaskMessage(this.commonOperations.create, (metadata) =>
this.service(metadata)
),
'service/edit': this.newTaskMessage(this.commonOperations.update, (metadata) =>
this.service(metadata)
),
'service/delete': this.newTaskMessage(this.commonOperations.delete, (metadata) =>
this.service(metadata)
),
'crud-component/create': this.newTaskMessage(this.commonOperations.create, (metadata) =>
this.crudMessage(metadata)
),
'crud-component/edit': this.newTaskMessage(this.commonOperations.update, (metadata) =>
this.crudMessage(metadata)
),
'crud-component/import': this.newTaskMessage(this.commonOperations.import, (metadata) =>
this.crudMessage(metadata)
),
'crud-component/id': this.newTaskMessage(this.commonOperations.delete, (id) =>
this.crudMessageId(id)
)
};
newTaskMessage(
operation: TaskMessageOperation,
involves: (metadata: any) => string,
errors?: (metadata: any) => object
) {
return new TaskMessage(operation, involves, errors);
}
host(metadata: any) {
return $localize`host '${metadata.hostname}'`;
}
osd(metadata: any) {
return $localize`OSD '${metadata.svc_id}'`;
}
pool(metadata: any) {
return $localize`pool '${metadata.pool_name}'`;
}
ecp(metadata: any) {
return $localize`erasure code profile '${metadata.name}'`;
}
crushRule(metadata: any) {
return $localize`crush rule '${metadata.name}'`;
}
iscsiTarget(metadata: any) {
return $localize`target '${metadata.target_iqn}'`;
}
nfs(metadata: any) {
return $localize`NFS '${metadata.cluster_id}\:${
metadata.export_id ? metadata.export_id : metadata.path
}'`;
}
service(metadata: any) {
return $localize`Service '${metadata.service_name}'`;
}
crudMessage(metadata: any) {
let message = metadata.__message;
_.forEach(metadata, (value, key) => {
if (key != '__message') {
let regex = '{' + key + '}';
message = message.replace(regex, value);
}
});
return $localize`${message}`;
}
crudMessageId(id: string) {
return $localize`${id}`;
}
_getTaskTitle(task: Task) {
if (task.name && task.name.startsWith('progress/')) {
// we don't fill the failure string because, at least for now, all
// progress module tasks will be considered successful
return this.newTaskMessage(
new TaskMessageOperation(
task.name.replace('progress/', ''),
'',
task.name.replace('progress/', '')
),
(_metadata) => ''
);
}
return this.messages[task.name] || this.defaultMessage;
}
getSuccessTitle(task: FinishedTask) {
return this._getTaskTitle(task).success(task.metadata);
}
getErrorMessage(task: FinishedTask) {
return (
this._getTaskTitle(task).errors(task.metadata)[task.exception.code] || task.exception.detail
);
}
getErrorTitle(task: Task) {
return this._getTaskTitle(task).failure(task.metadata);
}
getRunningTitle(task: Task) {
return this._getTaskTitle(task).running(task.metadata);
}
getRunningText(task: Task) {
return this._getTaskTitle(task).operation.running;
}
}
| 14,692 | 31.434879 | 100 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/task-wrapper.service.spec.ts
|
import { HttpClientTestingModule } from '@angular/common/http/testing';
import { inject, TestBed } from '@angular/core/testing';
import { RouterTestingModule } from '@angular/router/testing';
import { ToastrModule } from 'ngx-toastr';
import { Observable } from 'rxjs';
import { configureTestBed } from '~/testing/unit-test-helper';
import { FinishedTask } from '../models/finished-task';
import { SharedModule } from '../shared.module';
import { NotificationService } from './notification.service';
import { SummaryService } from './summary.service';
import { TaskManagerService } from './task-manager.service';
import { TaskWrapperService } from './task-wrapper.service';
describe('TaskWrapperService', () => {
let service: TaskWrapperService;
configureTestBed({
imports: [HttpClientTestingModule, ToastrModule.forRoot(), SharedModule, RouterTestingModule],
providers: [TaskWrapperService]
});
beforeEach(inject([TaskWrapperService], (wrapper: TaskWrapperService) => {
service = wrapper;
}));
it('should be created', () => {
expect(service).toBeTruthy();
});
describe('wrapTaskAroundCall', () => {
let notify: NotificationService;
let passed: boolean;
let summaryService: SummaryService;
const fakeCall = (status?: number) =>
new Observable((observer) => {
if (!status) {
observer.error({ error: 'failed' });
}
observer.next({ status: status });
observer.complete();
});
const callWrapTaskAroundCall = (status: number, name: string) => {
return service.wrapTaskAroundCall({
task: new FinishedTask(name, { sth: 'else' }),
call: fakeCall(status)
});
};
beforeEach(() => {
passed = false;
notify = TestBed.inject(NotificationService);
summaryService = TestBed.inject(SummaryService);
spyOn(notify, 'show');
spyOn(notify, 'notifyTask').and.stub();
spyOn(service, '_handleExecutingTasks').and.callThrough();
spyOn(summaryService, 'addRunningTask').and.callThrough();
});
it('should simulate a synchronous task', () => {
callWrapTaskAroundCall(200, 'sync').subscribe({ complete: () => (passed = true) });
expect(service._handleExecutingTasks).not.toHaveBeenCalled();
expect(passed).toBeTruthy();
expect(summaryService.addRunningTask).not.toHaveBeenCalled();
});
it('should simulate a asynchronous task', () => {
callWrapTaskAroundCall(202, 'async').subscribe({ complete: () => (passed = true) });
expect(service._handleExecutingTasks).toHaveBeenCalled();
expect(passed).toBeTruthy();
expect(summaryService.addRunningTask).toHaveBeenCalledTimes(1);
});
it('should call notifyTask if asynchronous task would have been finished', () => {
const taskManager = TestBed.inject(TaskManagerService);
spyOn(taskManager, 'subscribe').and.callFake((_name, _metadata, onTaskFinished) => {
onTaskFinished();
});
callWrapTaskAroundCall(202, 'async').subscribe({ complete: () => (passed = true) });
expect(notify.notifyTask).toHaveBeenCalled();
});
it('should simulate a task failure', () => {
callWrapTaskAroundCall(null, 'async').subscribe({ error: () => (passed = true) });
expect(service._handleExecutingTasks).not.toHaveBeenCalled();
expect(passed).toBeTruthy();
expect(summaryService.addRunningTask).not.toHaveBeenCalled();
/**
* A notification will be raised by the API interceptor.
* This resolves this bug https://tracker.ceph.com/issues/25139
*/
expect(notify.notifyTask).not.toHaveBeenCalled();
});
});
});
| 3,677 | 36.151515 | 98 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/task-wrapper.service.ts
|
import { Injectable } from '@angular/core';
import { Observable, Subscriber } from 'rxjs';
import { NotificationType } from '../enum/notification-type.enum';
import { CdNotificationConfig } from '../models/cd-notification';
import { ExecutingTask } from '../models/executing-task';
import { FinishedTask } from '../models/finished-task';
import { NotificationService } from './notification.service';
import { SummaryService } from './summary.service';
import { TaskManagerService } from './task-manager.service';
import { TaskMessageService } from './task-message.service';
@Injectable({
providedIn: 'root'
})
export class TaskWrapperService {
constructor(
private notificationService: NotificationService,
private summaryService: SummaryService,
private taskMessageService: TaskMessageService,
private taskManagerService: TaskManagerService
) {}
wrapTaskAroundCall({ task, call }: { task: FinishedTask; call: Observable<any> }) {
return new Observable((observer: Subscriber<any>) => {
call.subscribe(
(resp) => {
if (resp.status === 202) {
this._handleExecutingTasks(task);
} else {
this.summaryService.refresh();
task.success = true;
this.notificationService.notifyTask(task);
}
},
(resp) => {
task.success = false;
task.exception = resp.error;
observer.error(resp);
},
() => {
observer.complete();
}
);
});
}
_handleExecutingTasks(task: FinishedTask) {
const notification = new CdNotificationConfig(
NotificationType.info,
this.taskMessageService.getRunningTitle(task)
);
notification.isFinishedTask = true;
this.notificationService.show(notification);
const executingTask = new ExecutingTask(task.name, task.metadata);
this.summaryService.addRunningTask(executingTask);
this.taskManagerService.subscribe(
executingTask.name,
executingTask.metadata,
(asyncTask: FinishedTask) => {
this.notificationService.notifyTask(asyncTask);
}
);
}
}
| 2,140 | 30.028986 | 85 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/telemetry-notification.service.spec.ts
|
import { TestBed } from '@angular/core/testing';
import { configureTestBed } from '~/testing/unit-test-helper';
import { TelemetryNotificationService } from './telemetry-notification.service';
describe('TelemetryNotificationService', () => {
let service: TelemetryNotificationService;
configureTestBed({
providers: [TelemetryNotificationService]
});
beforeEach(() => {
service = TestBed.inject(TelemetryNotificationService);
spyOn(service.update, 'emit');
});
it('should be created', () => {
expect(service).toBeTruthy();
});
it('should set notification visibility to true', () => {
service.setVisibility(true);
expect(service.visible).toBe(true);
expect(service.update.emit).toHaveBeenCalledWith(true);
});
it('should set notification visibility to false', () => {
service.setVisibility(false);
expect(service.visible).toBe(false);
expect(service.update.emit).toHaveBeenCalledWith(false);
});
});
| 967 | 27.470588 | 80 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/telemetry-notification.service.ts
|
import { EventEmitter, Injectable, Output } from '@angular/core';
@Injectable({
providedIn: 'root'
})
export class TelemetryNotificationService {
visible = false;
@Output()
update: EventEmitter<boolean> = new EventEmitter<boolean>();
setVisibility(visible: boolean) {
this.visible = visible;
this.update.emit(visible);
}
}
| 346 | 19.411765 | 65 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/text-to-download.service.spec.ts
|
import { TestBed } from '@angular/core/testing';
import { configureTestBed } from '~/testing/unit-test-helper';
import { TextToDownloadService } from './text-to-download.service';
describe('TextToDownloadService', () => {
let service: TextToDownloadService;
configureTestBed({
providers: [TextToDownloadService]
});
beforeEach(() => {
service = TestBed.inject(TextToDownloadService);
});
it('should be created', () => {
expect(service).toBeTruthy();
});
});
| 489 | 22.333333 | 67 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/text-to-download.service.ts
|
import { Injectable } from '@angular/core';
import { saveAs } from 'file-saver';
@Injectable({
providedIn: 'root'
})
export class TextToDownloadService {
download(downloadText: string, filename?: string) {
saveAs(new Blob([downloadText]), filename);
}
}
| 266 | 19.538462 | 53 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/time-diff.service.spec.ts
|
import { TestBed } from '@angular/core/testing';
import { configureTestBed } from '~/testing/unit-test-helper';
import { TimeDiffService } from './time-diff.service';
describe('TimeDiffService', () => {
let service: TimeDiffService;
const baseTime = new Date('2022-02-22T00:00:00');
configureTestBed({
providers: [TimeDiffService]
});
beforeEach(() => {
service = TestBed.inject(TimeDiffService);
});
it('should be created', () => {
expect(service).toBeTruthy();
});
it('calculates a new date that happens after the given date', () => {
expect(service.calculateDate(new Date('2022-02-28T04:05:00'), '2h')).toEqual(
new Date('2022-02-28T06:05:00')
);
expect(service.calculateDate(baseTime, '15m')).toEqual(new Date('2022-02-22T00:15'));
expect(service.calculateDate(baseTime, '5d 23h')).toEqual(new Date('2022-02-27T23:00'));
});
it('calculates a new date that happens before the given date', () => {
expect(service.calculateDate(new Date('2022-02-22T02:00:00'), '2h', true)).toEqual(baseTime);
});
it('calculates the difference of two dates', () => {
expect(
service.calculateDuration(new Date('2022-02-22T00:45:00'), new Date('2022-02-22T02:00:00'))
).toBe('1h 15m');
expect(service.calculateDuration(baseTime, new Date('2022-02-28T04:05:00'))).toBe('6d 4h 5m');
});
it('should return an empty string if time diff is less then a minute', () => {
const ts = 1568361327000;
expect(service.calculateDuration(new Date(ts), new Date(ts + 120))).toBe('');
});
describe('testing duration calculation in detail', () => {
const minutes = 60 * 1000;
const hours = 60 * minutes;
const days = 24 * hours;
it('should allow different writings', () => {
const expectDurationToBeMs = (duration: string, ms: number) =>
expect(service['getDurationMs'](duration)).toBe(ms);
expectDurationToBeMs('2h', 2 * hours);
expectDurationToBeMs('4 Days', 4 * days);
expectDurationToBeMs('3 minutes', 3 * minutes);
expectDurationToBeMs('4 Days 2h 3 minutes', 4 * days + 2 * hours + 3 * minutes);
expectDurationToBeMs('5d3h120m', 5 * days + 5 * hours);
});
it('should create duration string from ms', () => {
const expectMsToBeDuration = (ms: number, duration: string) =>
expect(service['getDuration'](ms)).toBe(duration);
expectMsToBeDuration(2 * hours, '2h');
expectMsToBeDuration(4 * days, '4d');
expectMsToBeDuration(3 * minutes, '3m');
expectMsToBeDuration(4 * days + 2 * hours + 3 * minutes, '4d 2h 3m');
expectMsToBeDuration(service['getDurationMs']('5d3h120m'), '5d 5h');
});
});
});
| 2,689 | 36.361111 | 98 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/time-diff.service.ts
|
import { Injectable } from '@angular/core';
import _ from 'lodash';
@Injectable({
providedIn: 'root'
})
export class TimeDiffService {
calculateDuration(startDate: Date, endDate: Date): string {
const startTime = +startDate;
const endTime = +endDate;
const duration = this.getDuration(Math.abs(startTime - endTime));
if (startTime > endTime) {
return '-' + duration;
}
return duration;
}
/**
* Get the duration in the format '[Nd] [Nh] [Nm]', e.g. '2d 1h 15m'.
* @param ms The time in milliseconds.
* @return The duration. An empty string is returned if the duration is
* less than a minute.
*/
private getDuration(ms: number): string {
const date = new Date(ms);
const h = date.getUTCHours();
const m = date.getUTCMinutes();
const d = Math.floor(ms / (24 * 3600 * 1000));
const format = (n: number, s: string) => (n ? n + s : n);
return [format(d, 'd'), format(h, 'h'), format(m, 'm')].filter((x) => x).join(' ');
}
calculateDate(date: Date, duration: string, reverse?: boolean): Date {
const time = +date;
if (_.isNaN(time)) {
return undefined;
}
const diff = this.getDurationMs(duration) * (reverse ? -1 : 1);
return new Date(time + diff);
}
private getDurationMs(duration: string): number {
const d = this.getNumbersFromString(duration, 'd');
const h = this.getNumbersFromString(duration, 'h');
const m = this.getNumbersFromString(duration, 'm');
return ((d * 24 + h) * 60 + m) * 60000;
}
private getNumbersFromString(duration: string, prefix: string): number {
const match = duration.match(new RegExp(`[0-9 ]+${prefix}`, 'i'));
return match ? parseInt(match[0], 10) : 0;
}
}
| 1,732 | 29.946429 | 87 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/timer.service.spec.ts
|
import { fakeAsync, TestBed, tick } from '@angular/core/testing';
import { of, Subscription } from 'rxjs';
import { configureTestBed } from '~/testing/unit-test-helper';
import { TimerService } from './timer.service';
describe('TimerService', () => {
let service: TimerService;
let subs: Subscription;
let receivedData: any[];
const next = () => of(true);
const observer = (data: boolean) => {
receivedData.push(data);
};
configureTestBed({
providers: [TimerService]
});
beforeEach(() => {
service = TestBed.inject(TimerService);
receivedData = [];
});
it('should be created', () => {
expect(service).toBeTruthy();
});
it('should not emit any value when no subscribers', fakeAsync(() => {
subs = service.get(next).subscribe(observer);
tick(service.DEFAULT_REFRESH_INTERVAL);
expect(receivedData.length).toEqual(2);
subs.unsubscribe();
tick(service.DEFAULT_REFRESH_INTERVAL);
expect(receivedData.length).toEqual(2);
}));
it('should emit value with no dueTime and no refresh interval', fakeAsync(() => {
subs = service.get(next, null, null).subscribe(observer);
tick(service.DEFAULT_REFRESH_INTERVAL);
expect(receivedData.length).toEqual(1);
expect(receivedData).toEqual([true]);
subs.unsubscribe();
}));
it('should emit expected values when refresh interval + no dueTime', fakeAsync(() => {
subs = service.get(next).subscribe(observer);
tick(service.DEFAULT_REFRESH_INTERVAL * 2);
expect(receivedData.length).toEqual(3);
expect(receivedData).toEqual([true, true, true]);
subs.unsubscribe();
}));
it('should emit expected values when dueTime equal to refresh interval', fakeAsync(() => {
const dueTime = 1000;
subs = service.get(next, service.DEFAULT_REFRESH_INTERVAL, dueTime).subscribe(observer);
tick(service.DEFAULT_REFRESH_INTERVAL * 2);
expect(receivedData.length).toEqual(2);
expect(receivedData).toEqual([true, true]);
subs.unsubscribe();
}));
});
| 2,017 | 28.246377 | 92 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/timer.service.ts
|
import { Injectable } from '@angular/core';
import { Observable, timer } from 'rxjs';
import { observeOn, shareReplay, switchMap } from 'rxjs/operators';
import { whenPageVisible } from '../rxjs/operators/page-visibilty.operator';
import { NgZoneSchedulerService } from './ngzone-scheduler.service';
@Injectable({
providedIn: 'root'
})
export class TimerService {
readonly DEFAULT_REFRESH_INTERVAL = 5000;
readonly DEFAULT_DUE_TIME = 0;
constructor(private ngZone: NgZoneSchedulerService) {}
get(
next: () => Observable<any>,
refreshInterval: number = this.DEFAULT_REFRESH_INTERVAL,
dueTime: number = this.DEFAULT_DUE_TIME
): Observable<any> {
return timer(dueTime, refreshInterval, this.ngZone.leave).pipe(
observeOn(this.ngZone.enter),
switchMap(next),
shareReplay({ refCount: true, bufferSize: 1 }),
whenPageVisible()
);
}
}
| 890 | 28.7 | 76 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/url-builder.service.spec.ts
|
import { URLVerbs } from '../constants/app.constants';
import { URLBuilderService } from './url-builder.service';
describe('URLBuilderService', () => {
const BASE = 'pool';
const urlBuilder = new URLBuilderService(BASE);
it('get base', () => {
expect(urlBuilder.base).toBe(BASE);
});
it('build absolute URL', () => {
expect(URLBuilderService.buildURL(true, urlBuilder.base, URLVerbs.CREATE)).toBe(
`/${urlBuilder.base}/${URLVerbs.CREATE}`
);
});
it('build relative URL', () => {
expect(URLBuilderService.buildURL(false, urlBuilder.base, URLVerbs.CREATE)).toBe(
`${urlBuilder.base}/${URLVerbs.CREATE}`
);
});
it('get Create URL', () => {
expect(urlBuilder.getCreate()).toBe(`/${urlBuilder.base}/${URLVerbs.CREATE}`);
});
it('get Create From URL', () => {
const id = 'someId';
expect(urlBuilder.getCreateFrom(id)).toBe(`/${urlBuilder.base}/${URLVerbs.CREATE}/${id}`);
});
it('get Edit URL with item', () => {
const item = 'test_pool';
expect(urlBuilder.getEdit(item)).toBe(`/${urlBuilder.base}/${URLVerbs.EDIT}/${item}`);
});
});
| 1,115 | 28.368421 | 94 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/url-builder.service.ts
|
import { Location } from '@angular/common';
import { URLVerbs } from '../constants/app.constants';
export class URLBuilderService {
constructor(readonly base: string) {}
private static concatURLSegments(segments: string[]): string {
return segments.reduce(Location.joinWithSlash);
}
static buildURL(absolute: boolean, ...segments: string[]): string {
return URLBuilderService.concatURLSegments([...(absolute ? ['/'] : []), ...segments]);
}
private getURL(verb: URLVerbs, absolute = true, ...segments: string[]): string {
return URLBuilderService.buildURL(absolute, this.base, verb, ...segments);
}
getCreate(absolute = true): string {
return this.getURL(URLVerbs.CREATE, absolute);
}
getCreateFrom(item: string, absolute = true): string {
return this.getURL(URLVerbs.CREATE, absolute, item);
}
getDelete(absolute = true): string {
return this.getURL(URLVerbs.DELETE, absolute);
}
getEdit(item: string, absolute = true): string {
return this.getURL(URLVerbs.EDIT, absolute, item);
}
getUpdate(item: string, absolute = true): string {
return this.getURL(URLVerbs.UPDATE, absolute, item);
}
getAdd(absolute = true): string {
return this.getURL(URLVerbs.ADD, absolute);
}
getRemove(absolute = true): string {
return this.getURL(URLVerbs.REMOVE, absolute);
}
// Prometheus wording
getRecreate(item: string, absolute = true): string {
return this.getURL(URLVerbs.RECREATE, absolute, item);
}
}
| 1,491 | 28.254902 | 90 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/wizard-steps.service.spec.ts
|
import { TestBed } from '@angular/core/testing';
import { WizardStepsService } from './wizard-steps.service';
describe('WizardStepsService', () => {
let service: WizardStepsService;
beforeEach(() => {
TestBed.configureTestingModule({});
service = TestBed.inject(WizardStepsService);
});
it('should be created', () => {
expect(service).toBeTruthy();
});
});
| 383 | 21.588235 | 60 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/app/shared/services/wizard-steps.service.ts
|
import { Injectable } from '@angular/core';
import { BehaviorSubject, Observable } from 'rxjs';
import { WizardStepModel } from '~/app/shared/models/wizard-steps';
const initialStep = [{ stepIndex: 1, isComplete: false }];
@Injectable({
providedIn: 'root'
})
export class WizardStepsService {
steps$: BehaviorSubject<WizardStepModel[]>;
currentStep$: BehaviorSubject<WizardStepModel> = new BehaviorSubject<WizardStepModel>(null);
constructor() {
this.steps$ = new BehaviorSubject<WizardStepModel[]>(initialStep);
this.currentStep$.next(this.steps$.value[0]);
}
setTotalSteps(step: number) {
const steps: WizardStepModel[] = [];
for (let i = 1; i <= step; i++) {
steps.push({ stepIndex: i, isComplete: false });
}
this.steps$ = new BehaviorSubject<WizardStepModel[]>(steps);
}
setCurrentStep(step: WizardStepModel): void {
this.currentStep$.next(step);
}
getCurrentStep(): Observable<WizardStepModel> {
return this.currentStep$.asObservable();
}
getSteps(): Observable<WizardStepModel[]> {
return this.steps$.asObservable();
}
moveToNextStep(): void {
const index = this.currentStep$.value.stepIndex;
this.currentStep$.next(this.steps$.value[index]);
}
moveToPreviousStep(): void {
const index = this.currentStep$.value.stepIndex - 1;
this.currentStep$.next(this.steps$.value[index - 1]);
}
isLastStep(): boolean {
return this.currentStep$.value.stepIndex === this.steps$.value.length;
}
isFirstStep(): boolean {
return this.currentStep$.value?.stepIndex - 1 === 0;
}
}
| 1,590 | 25.966102 | 94 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/environments/environment.tpl.ts
|
// The file contents for the current environment will overwrite these during build.
// The build system defaults to the dev environment which uses `environment.ts`, but if you do
// `ng build --env=prod` then `environment.prod.ts` will be used instead.
// The list of which env maps to which file can be found in `.angular-cli.json`.
export const environment = {
default_lang: '{DEFAULT_LANG}',
production: '{PRODUCTION}',
year: '{COPYRIGHT_YEAR}'
};
| 458 | 40.727273 | 94 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/testing/activated-route-stub.ts
|
import { ActivatedRoute } from '@angular/router';
import { ReplaySubject } from 'rxjs';
/**
* An ActivateRoute test double with a `params` observable.
* Use the `setParams()` method to add the next `params` value.
*/
export class ActivatedRouteStub extends ActivatedRoute {
// Use a ReplaySubject to share previous values with subscribers
// and pump new values into the `params` observable
private subject = new ReplaySubject<object>();
constructor(initialParams?: object) {
super();
this.setParams(initialParams);
}
/** The mock params observable */
readonly params = this.subject.asObservable();
/** Set the params observables's next value */
setParams(params?: object) {
this.subject.next(params);
}
}
| 746 | 26.666667 | 66 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/src/testing/unit-test-helper.ts
|
import { DebugElement, Type } from '@angular/core';
import { ComponentFixture, TestBed } from '@angular/core/testing';
import { AbstractControl } from '@angular/forms';
import { By } from '@angular/platform-browser';
import { BrowserDynamicTestingModule } from '@angular/platform-browser-dynamic/testing';
import { NgbModal, NgbNav, NgbNavItem, NgbNavLink } from '@ng-bootstrap/ng-bootstrap';
import _ from 'lodash';
import { configureSuite } from '@apteco/ngth';
import { of } from 'rxjs';
import { InventoryDevice } from '~/app/ceph/cluster/inventory/inventory-devices/inventory-device.model';
import { Pool } from '~/app/ceph/pool/pool';
import { RgwDaemon } from '~/app/ceph/rgw/models/rgw-daemon';
import { OrchestratorService } from '~/app/shared/api/orchestrator.service';
import { RgwDaemonService } from '~/app/shared/api/rgw-daemon.service';
import { TableActionsComponent } from '~/app/shared/datatable/table-actions/table-actions.component';
import { Icons } from '~/app/shared/enum/icons.enum';
import { CdFormGroup } from '~/app/shared/forms/cd-form-group';
import { CdTableAction } from '~/app/shared/models/cd-table-action';
import { CdTableSelection } from '~/app/shared/models/cd-table-selection';
import { CrushNode } from '~/app/shared/models/crush-node';
import { CrushRule, CrushRuleConfig } from '~/app/shared/models/crush-rule';
import { OrchestratorFeature } from '~/app/shared/models/orchestrator.enum';
import { Permission } from '~/app/shared/models/permissions';
import {
AlertmanagerAlert,
AlertmanagerNotification,
AlertmanagerNotificationAlert,
PrometheusRule
} from '~/app/shared/models/prometheus-alerts';
export function configureTestBed(configuration: any, entryComponents?: any) {
configureSuite(() => {
if (entryComponents) {
// Declare entryComponents without having to add them to a module
// This is needed since Jest doesn't yet support not declaring entryComponents
TestBed.configureTestingModule(configuration).overrideModule(BrowserDynamicTestingModule, {
set: { entryComponents: entryComponents }
});
} else {
TestBed.configureTestingModule(configuration);
}
});
}
export class PermissionHelper {
tac: TableActionsComponent;
permission: Permission;
selection: { single: object; multiple: object[] };
/**
* @param permission The permissions used by this test.
* @param selection The selection used by this test. Configure this if
* the table actions require a more complex selection object to perform
* a correct test run.
* Defaults to `{ single: {}, multiple: [{}, {}] }`.
*/
constructor(permission: Permission, selection?: { single: object; multiple: object[] }) {
this.permission = permission;
this.selection = _.defaultTo(selection, { single: {}, multiple: [{}, {}] });
}
setPermissionsAndGetActions(tableActions: CdTableAction[]): any {
const result = {};
[true, false].forEach((create) => {
[true, false].forEach((update) => {
[true, false].forEach((deleteP) => {
this.permission.create = create;
this.permission.update = update;
this.permission.delete = deleteP;
this.tac = new TableActionsComponent();
this.tac.selection = new CdTableSelection();
this.tac.tableActions = [...tableActions];
this.tac.permission = this.permission;
this.tac.ngOnInit();
const perms = [];
if (create) {
perms.push('create');
}
if (update) {
perms.push('update');
}
if (deleteP) {
perms.push('delete');
}
const permissionText = perms.join(',');
result[permissionText !== '' ? permissionText : 'no-permissions'] = {
actions: this.tac.tableActions.map((action) => action.name),
primary: this.testScenarios()
};
});
});
});
return result;
}
testScenarios() {
const result: any = {};
// 'multiple selections'
result.multiple = this.testScenario(this.selection.multiple);
// 'select executing item'
result.executing = this.testScenario([
_.merge({ cdExecuting: 'someAction' }, this.selection.single)
]);
// 'select non-executing item'
result.single = this.testScenario([this.selection.single]);
// 'no selection'
result.no = this.testScenario([]);
return result;
}
private testScenario(selection: object[]) {
this.setSelection(selection);
const action: CdTableAction = this.tac.currentAction;
return action ? action.name : '';
}
setSelection(selection: object[]) {
this.tac.selection.selected = selection;
this.tac.onSelectionChange();
}
}
export class FormHelper {
form: CdFormGroup;
constructor(form: CdFormGroup) {
this.form = form;
}
/**
* Changes multiple values in multiple controls
*/
setMultipleValues(values: { [controlName: string]: any }, markAsDirty?: boolean) {
Object.keys(values).forEach((key) => {
this.setValue(key, values[key], markAsDirty);
});
}
/**
* Changes the value of a control
*/
setValue(control: AbstractControl | string, value: any, markAsDirty?: boolean): AbstractControl {
control = this.getControl(control);
if (markAsDirty) {
control.markAsDirty();
}
control.setValue(value);
return control;
}
private getControl(control: AbstractControl | string): AbstractControl {
if (typeof control === 'string') {
return this.form.get(control);
}
return control;
}
/**
* Change the value of the control and expect the control to be valid afterwards.
*/
expectValidChange(control: AbstractControl | string, value: any, markAsDirty?: boolean) {
this.expectValid(this.setValue(control, value, markAsDirty));
}
/**
* Expect that the given control is valid.
*/
expectValid(control: AbstractControl | string) {
// 'isValid' would be false for disabled controls
expect(this.getControl(control).errors).toBe(null);
}
/**
* Change the value of the control and expect a specific error.
*/
expectErrorChange(
control: AbstractControl | string,
value: any,
error: string,
markAsDirty?: boolean
) {
this.expectError(this.setValue(control, value, markAsDirty), error);
}
/**
* Expect a specific error for the given control.
*/
expectError(control: AbstractControl | string, error: string) {
expect(this.getControl(control).hasError(error)).toBeTruthy();
}
}
/**
* Use this to mock 'modalService.open' to make the embedded component with it's fixture usable
* in tests. The function gives back all needed parts including the modal reference.
*
* Please make sure to call this function *inside* your mock and return the reference at the end.
*/
export function modalServiceShow(componentClass: Type<any>, modalConfig: any) {
const modal: NgbModal = TestBed.inject(NgbModal);
const modalRef = modal.open(componentClass);
if (modalConfig) {
Object.assign(modalRef.componentInstance, modalConfig);
}
return modalRef;
}
export class FixtureHelper {
fixture: ComponentFixture<any>;
constructor(fixture?: ComponentFixture<any>) {
if (fixture) {
this.updateFixture(fixture);
}
}
updateFixture(fixture: ComponentFixture<any>) {
this.fixture = fixture;
}
/**
* Expect a list of id elements to be visible or not.
*/
expectIdElementsVisible(ids: string[], visibility: boolean) {
ids.forEach((css) => {
this.expectElementVisible(`#${css}`, visibility);
});
}
/**
* Expect a specific element to be visible or not.
*/
expectElementVisible(css: string, visibility: boolean) {
expect(visibility).toBe(Boolean(this.getElementByCss(css)));
}
expectFormFieldToBe(css: string, value: string) {
const props = this.getElementByCss(css).properties;
expect(props['value'] || props['checked'].toString()).toBe(value);
}
expectTextToBe(css: string, value: string) {
expect(this.getText(css)).toBe(value);
}
clickElement(css: string) {
this.getElementByCss(css).triggerEventHandler('click', null);
this.fixture.detectChanges();
}
selectElement(css: string, value: string) {
const nativeElement = this.getElementByCss(css).nativeElement;
nativeElement.value = value;
nativeElement.dispatchEvent(new Event('change'));
this.fixture.detectChanges();
}
getText(css: string) {
const e = this.getElementByCss(css);
return e ? e.nativeElement.textContent.trim() : null;
}
getTextAll(css: string) {
const elements = this.getElementByCssAll(css);
return elements.map((element) => {
return element ? element.nativeElement.textContent.trim() : null;
});
}
getElementByCss(css: string) {
this.fixture.detectChanges();
return this.fixture.debugElement.query(By.css(css));
}
getElementByCssAll(css: string) {
this.fixture.detectChanges();
return this.fixture.debugElement.queryAll(By.css(css));
}
}
export class PrometheusHelper {
createSilence(id: string) {
return {
id: id,
createdBy: `Creator of ${id}`,
comment: `A comment for ${id}`,
startsAt: new Date('2022-02-22T22:22:00').toISOString(),
endsAt: new Date('2022-02-23T22:22:00').toISOString(),
matchers: [
{
name: 'job',
value: 'someJob',
isRegex: true
}
]
};
}
createRule(name: string, severity: string, alerts: any[]): PrometheusRule {
return {
name: name,
labels: {
severity: severity
},
alerts: alerts
} as PrometheusRule;
}
createAlert(name: string, state = 'active', timeMultiplier = 1): AlertmanagerAlert {
return {
fingerprint: name,
status: { state },
labels: {
alertname: name,
instance: 'someInstance',
job: 'someJob',
severity: 'someSeverity'
},
annotations: {
description: `${name} is ${state}`
},
generatorURL: `http://${name}`,
startsAt: new Date(new Date('2022-02-22').getTime() * timeMultiplier).toString()
} as AlertmanagerAlert;
}
createNotificationAlert(name: string, status = 'firing'): AlertmanagerNotificationAlert {
return {
status: status,
labels: {
alertname: name
},
annotations: {
description: `${name} is ${status}`
},
generatorURL: `http://${name}`
} as AlertmanagerNotificationAlert;
}
createNotification(alertNumber = 1, status = 'firing'): AlertmanagerNotification {
const alerts = [];
for (let i = 0; i < alertNumber; i++) {
alerts.push(this.createNotificationAlert('alert' + i, status));
}
return { alerts, status } as AlertmanagerNotification;
}
createLink(url: string) {
return `<a href="${url}" target="_blank"><i class="${Icons.lineChart}"></i></a>`;
}
}
export function expectItemTasks(item: any, executing: string, percentage?: number) {
if (executing) {
executing = executing + '...';
if (percentage) {
executing = `${executing} ${percentage}%`;
}
}
expect(item.cdExecuting).toBe(executing);
}
export class IscsiHelper {
static validateUser(formHelper: FormHelper, fieldName: string) {
formHelper.expectErrorChange(fieldName, 'short', 'pattern');
formHelper.expectValidChange(fieldName, 'thisIsCorrect');
formHelper.expectErrorChange(fieldName, '##?badChars?##', 'pattern');
formHelper.expectErrorChange(
fieldName,
'thisUsernameIsWayyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyTooBig',
'pattern'
);
}
static validatePassword(formHelper: FormHelper, fieldName: string) {
formHelper.expectErrorChange(fieldName, 'short', 'pattern');
formHelper.expectValidChange(fieldName, 'thisIsCorrect');
formHelper.expectErrorChange(fieldName, '##?badChars?##', 'pattern');
formHelper.expectErrorChange(fieldName, 'thisPasswordIsWayTooBig', 'pattern');
}
}
export class RgwHelper {
static readonly daemons = RgwHelper.getDaemonList();
static readonly DAEMON_QUERY_PARAM = `daemon_name=${RgwHelper.daemons[0].id}`;
static getDaemonList() {
const daemonList: RgwDaemon[] = [];
for (let daemonIndex = 1; daemonIndex <= 3; daemonIndex++) {
const rgwDaemon = new RgwDaemon();
rgwDaemon.id = `daemon${daemonIndex}`;
rgwDaemon.default = daemonIndex === 2;
rgwDaemon.zonegroup_name = `zonegroup${daemonIndex}`;
daemonList.push(rgwDaemon);
}
return daemonList;
}
static selectDaemon() {
const service = TestBed.inject(RgwDaemonService);
service.selectDaemon(this.daemons[0]);
}
}
export class Mocks {
static getCrushNode(
name: string,
id: number,
type: string,
type_id: number,
children?: number[],
device_class?: string
): CrushNode {
return { name, type, type_id, id, children, device_class };
}
static getPool = (name: string, id: number): Pool => {
return _.merge(new Pool(name), {
pool: id,
type: 'replicated',
pg_num: 256,
pg_placement_num: 256,
pg_num_target: 256,
pg_placement_num_target: 256,
size: 3
});
};
/**
* Create the following test crush map:
* > default
* --> ssd-host
* ----> 3x osd with ssd
* --> mix-host
* ----> hdd-rack
* ------> 2x osd-rack with hdd
* ----> ssd-rack
* ------> 2x osd-rack with ssd
*/
static getCrushMap(): CrushNode[] {
return [
// Root node
this.getCrushNode('default', -1, 'root', 11, [-2, -3]),
// SSD host
this.getCrushNode('ssd-host', -2, 'host', 1, [1, 0, 2]),
this.getCrushNode('osd.0', 0, 'osd', 0, undefined, 'ssd'),
this.getCrushNode('osd.1', 1, 'osd', 0, undefined, 'ssd'),
this.getCrushNode('osd.2', 2, 'osd', 0, undefined, 'ssd'),
// SSD and HDD mixed devices host
this.getCrushNode('mix-host', -3, 'host', 1, [-4, -5]),
// HDD rack
this.getCrushNode('hdd-rack', -4, 'rack', 3, [3, 4]),
this.getCrushNode('osd2.0', 3, 'osd-rack', 0, undefined, 'hdd'),
this.getCrushNode('osd2.1', 4, 'osd-rack', 0, undefined, 'hdd'),
// SSD rack
this.getCrushNode('ssd-rack', -5, 'rack', 3, [5, 6]),
this.getCrushNode('osd3.0', 5, 'osd-rack', 0, undefined, 'ssd'),
this.getCrushNode('osd3.1', 6, 'osd-rack', 0, undefined, 'ssd')
];
}
/**
* Generates an simple crush map with multiple hosts that have OSDs with either ssd or hdd OSDs.
* Hosts with zero or even numbers at the end have SSD OSDs the other hosts have hdd OSDs.
*
* Host names follow the following naming convention:
* host.$index
* $index represents a number count started at 0 (like an index within an array) (same for OSDs)
*
* OSD names follow the following naming convention:
* osd.$hostIndex.$osdIndex
*
* The following crush map will be generated with the set defaults:
* > default
* --> host.0 (has only ssd OSDs)
* ----> osd.0.0
* ----> osd.0.1
* ----> osd.0.2
* ----> osd.0.3
* --> host.1 (has only hdd OSDs)
* ----> osd.1.0
* ----> osd.1.1
* ----> osd.1.2
* ----> osd.1.3
*/
static generateSimpleCrushMap(hosts: number = 2, osds: number = 4): CrushNode[] {
const nodes = [];
const createOsdLeafs = (hostSuffix: number): number[] => {
let osdId = 0;
const osdIds = [];
const osdsInUse = hostSuffix * osds;
for (let o = 0; o < osds; o++) {
osdIds.push(osdId);
nodes.push(
this.getCrushNode(
`osd.${hostSuffix}.${osdId}`,
osdId + osdsInUse,
'osd',
0,
undefined,
hostSuffix % 2 === 0 ? 'ssd' : 'hdd'
)
);
osdId++;
}
return osdIds;
};
const createHostBuckets = (): number[] => {
let hostId = -2;
const hostIds = [];
for (let h = 0; h < hosts; h++) {
const hostSuffix = hostId * -1 - 2;
hostIds.push(hostId);
nodes.push(
this.getCrushNode(`host.${hostSuffix}`, hostId, 'host', 1, createOsdLeafs(hostSuffix))
);
hostId--;
}
return hostIds;
};
nodes.push(this.getCrushNode('default', -1, 'root', 11, createHostBuckets()));
return nodes;
}
static getCrushRuleConfig(
name: string,
root: string,
failure_domain: string,
device_class?: string
): CrushRuleConfig {
return {
name,
root,
failure_domain,
device_class
};
}
static getCrushRule({
id = 0,
name = 'somePoolName',
type = 'replicated',
failureDomain = 'osd',
itemName = 'default' // This string also sets the device type - "default~ssd" <- ssd usage only
}: {
id?: number;
name?: string;
type?: string;
failureDomain?: string;
itemName?: string;
}): CrushRule {
const rule = new CrushRule();
rule.type = type === 'erasure' ? 3 : 1;
rule.rule_id = id;
rule.rule_name = name;
rule.steps = [
{
item_name: itemName,
item: -1,
op: 'take'
},
{
num: 0,
type: failureDomain,
op: 'choose_firstn'
},
{
op: 'emit'
}
];
return rule;
}
static getInventoryDevice(
hostname: string,
uid: string,
path = 'sda',
available = false
): InventoryDevice {
return {
hostname,
uid,
path,
available,
sys_api: {
vendor: 'AAA',
model: 'aaa',
size: 1024,
rotational: 'false',
human_readable_size: '1 KB'
},
rejected_reasons: [''],
device_id: 'AAA-aaa-id0',
human_readable_type: 'nvme/ssd',
osd_ids: []
};
}
}
export class TabHelper {
static getNgbNav(fixture: ComponentFixture<any>) {
const debugElem: DebugElement = fixture.debugElement;
return debugElem.query(By.directive(NgbNav)).injector.get(NgbNav);
}
static getNgbNavItems(fixture: ComponentFixture<any>) {
const debugElems = this.getNgbNavItemsDebugElems(fixture);
return debugElems.map((de) => de.injector.get(NgbNavItem));
}
static getTextContents(fixture: ComponentFixture<any>) {
const debugElems = this.getNgbNavItemsDebugElems(fixture);
return debugElems.map((de) => de.nativeElement.textContent);
}
private static getNgbNavItemsDebugElems(fixture: ComponentFixture<any>) {
const debugElem: DebugElement = fixture.debugElement;
return debugElem.queryAll(By.directive(NgbNavLink));
}
}
export class OrchestratorHelper {
/**
* Mock Orchestrator status.
* @param available is the Orchestrator enabled?
* @param features A list of enabled Orchestrator features.
*/
static mockStatus(available: boolean, features?: OrchestratorFeature[]) {
const orchStatus = { available: available, description: '', features: {} };
if (features) {
features.forEach((feature: OrchestratorFeature) => {
orchStatus.features[feature] = { available: true };
});
}
spyOn(TestBed.inject(OrchestratorService), 'status').and.callFake(() => of(orchStatus));
}
}
export class TableActionHelper {
/**
* Verify table action buttons, including the button disabled state and disable description.
*
* @param fixture test fixture
* @param tableActions table actions
* @param expectResult expected values. e.g. {Create: { disabled: true, disableDesc: 'not supported'}}.
* Expect the Create button to be disabled with 'not supported' tooltip.
*/
static verifyTableActions = async (
fixture: ComponentFixture<any>,
tableActions: CdTableAction[],
expectResult: {
[action: string]: { disabled: boolean; disableDesc: string };
}
) => {
// click dropdown to update all actions buttons
const dropDownToggle = fixture.debugElement.query(By.css('.dropdown-toggle'));
dropDownToggle.triggerEventHandler('click', null);
fixture.detectChanges();
await fixture.whenStable();
const tableActionElement = fixture.debugElement.query(By.directive(TableActionsComponent));
const toClassName = TestBed.inject(TableActionsComponent).toClassName;
const getActionElement = (action: CdTableAction) =>
tableActionElement.query(By.css(`[ngbDropdownItem].${toClassName(action)}`));
const actions = {};
tableActions.forEach((action) => {
const actionElement = getActionElement(action);
if (expectResult[action.name]) {
actions[action.name] = {
disabled: actionElement.classes.disabled ? true : false,
disableDesc: actionElement.properties.title
};
}
});
expect(actions).toEqual(expectResult);
};
}
| 20,814 | 29.342566 | 105 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/model/__init__.py
|
# -*- coding: utf-8 -*-
| 24 | 11.5 | 23 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/plugins/__init__.py
|
# -*- coding: utf-8 -*-
import abc
from .pluggy import HookimplMarker, HookspecMarker, PluginManager
class Interface(object, metaclass=abc.ABCMeta):
pass
class Mixin(object):
pass
class DashboardPluginManager(object):
def __init__(self, project_name):
self.__pm = PluginManager(project_name)
self.__add_spec = HookspecMarker(project_name)
self.__add_abcspec = lambda *args, **kwargs: abc.abstractmethod(
self.__add_spec(*args, **kwargs))
self.__add_hook = HookimplMarker(project_name)
pm = property(lambda self: self.__pm)
hook = property(lambda self: self.pm.hook)
add_spec = property(lambda self: self.__add_spec)
add_abcspec = property(lambda self: self.__add_abcspec)
add_hook = property(lambda self: self.__add_hook)
def add_interface(self, cls):
assert issubclass(cls, Interface)
self.pm.add_hookspecs(cls)
return cls
@staticmethod
def final(func):
setattr(func, '__final__', True)
return func
def add_plugin(self, plugin):
""" Provides decorator interface for PluginManager.register():
@PLUGIN_MANAGER.add_plugin
class Plugin(...):
...
Additionally it checks whether the Plugin instance has all Interface
methods implemented and marked with add_hook decorator.
As a con of this approach, plugins cannot call super() from __init__()
"""
assert issubclass(plugin, Interface)
from inspect import getmembers, ismethod
for interface in plugin.__bases__:
for method_name, _ in getmembers(interface, predicate=ismethod):
if hasattr(getattr(interface, method_name), '__final__'):
continue
if self.pm.parse_hookimpl_opts(plugin, method_name) is None:
raise NotImplementedError(
"Plugin '{}' implements interface '{}' but existing"
" method '{}' is not declared added as hook".format(
plugin.__name__,
interface.__name__,
method_name))
self.pm.register(plugin())
return plugin
PLUGIN_MANAGER = DashboardPluginManager("ceph-mgr.dashboard")
# Load all interfaces and their hooks
from . import interfaces # noqa pylint: disable=C0413,W0406
| 2,420 | 32.625 | 78 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/plugins/debug.py
|
# -*- coding: utf-8 -*-
import json
from enum import Enum
from typing import no_type_check
from . import PLUGIN_MANAGER as PM
from . import interfaces as I # noqa: E741,N812
from .plugin import SimplePlugin as SP
class Actions(Enum):
ENABLE = 'enable'
DISABLE = 'disable'
STATUS = 'status'
@PM.add_plugin # pylint: disable=too-many-ancestors
class Debug(SP, I.CanCherrypy, I.ConfiguresCherryPy, # pylint: disable=too-many-ancestors
I.Setupable, I.ConfigNotify):
NAME = 'debug'
OPTIONS = [
SP.Option(
name=NAME,
default=False,
type='bool',
desc="Enable/disable debug options"
)
]
@no_type_check # https://github.com/python/mypy/issues/7806
def _refresh_health_checks(self):
debug = self.get_option(self.NAME)
if debug:
self.mgr.health_checks.update({'DASHBOARD_DEBUG': {
'severity': 'warning',
'summary': 'Dashboard debug mode is enabled',
'detail': [
'Please disable debug mode in production environments using '
'"ceph dashboard {} {}"'.format(self.NAME, Actions.DISABLE.value)
]
}})
else:
self.mgr.health_checks.pop('DASHBOARD_DEBUG', None)
self.mgr.refresh_health_checks()
@PM.add_hook
def setup(self):
self._refresh_health_checks()
@no_type_check
def handler(self, action: Actions):
'''
Control and report debug status in Ceph-Dashboard
'''
ret = 0
msg = ''
if action in [Actions.ENABLE, Actions.DISABLE]:
self.set_option(self.NAME, action == Actions.ENABLE)
self.mgr.update_cherrypy_config({})
self._refresh_health_checks()
else:
debug = self.get_option(self.NAME)
msg = "Debug: '{}'".format('enabled' if debug else 'disabled')
return ret, msg, None
COMMANDS = [
SP.Command(
prefix="dashboard {name}".format(name=NAME),
handler=handler
)
]
def custom_error_response(self, status, message, traceback, version):
self.response.headers['Content-Type'] = 'application/json'
error_response = dict(status=status, detail=message, request_id=str(self.request.unique_id))
if self.get_option(self.NAME):
error_response.update(dict(traceback=traceback, version=version))
return json.dumps(error_response)
@PM.add_hook
def configure_cherrypy(self, config):
config.update({
'environment': 'test_suite' if self.get_option(self.NAME) else 'production',
'error_page.default': self.custom_error_response,
})
@PM.add_hook
def config_notify(self):
self._refresh_health_checks()
| 2,872 | 29.242105 | 100 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/plugins/feature_toggles.py
|
# -*- coding: utf-8 -*-
from enum import Enum
from typing import List, Optional, Set, no_type_check
import cherrypy
from mgr_module import CLICommand, Option
from ..controllers.cephfs import CephFS
from ..controllers.iscsi import Iscsi, IscsiTarget
from ..controllers.nfs import NFSGaneshaExports, NFSGaneshaUi
from ..controllers.rbd import Rbd, RbdSnapshot, RbdTrash
from ..controllers.rbd_mirroring import RbdMirroringPoolMode, \
RbdMirroringPoolPeer, RbdMirroringSummary
from ..controllers.rgw import Rgw, RgwBucket, RgwDaemon, RgwUser
from . import PLUGIN_MANAGER as PM
from . import interfaces as I # noqa: E741,N812
from .ttl_cache import ttl_cache
class Features(Enum):
RBD = 'rbd'
MIRRORING = 'mirroring'
ISCSI = 'iscsi'
CEPHFS = 'cephfs'
RGW = 'rgw'
NFS = 'nfs'
DASHBOARD = 'dashboard'
PREDISABLED_FEATURES = set() # type: Set[str]
Feature2Controller = {
Features.RBD: [Rbd, RbdSnapshot, RbdTrash],
Features.MIRRORING: [
RbdMirroringSummary, RbdMirroringPoolMode, RbdMirroringPoolPeer],
Features.ISCSI: [Iscsi, IscsiTarget],
Features.CEPHFS: [CephFS],
Features.RGW: [Rgw, RgwDaemon, RgwBucket, RgwUser],
Features.NFS: [NFSGaneshaUi, NFSGaneshaExports],
}
class Actions(Enum):
ENABLE = 'enable'
DISABLE = 'disable'
STATUS = 'status'
# pylint: disable=too-many-ancestors
@PM.add_plugin
class FeatureToggles(I.CanMgr, I.Setupable, I.HasOptions,
I.HasCommands, I.FilterRequest.BeforeHandler,
I.HasControllers):
OPTION_FMT = 'FEATURE_TOGGLE_{.name}'
CACHE_MAX_SIZE = 128 # Optimum performance with 2^N sizes
CACHE_TTL = 10 # seconds
@PM.add_hook
def setup(self):
# pylint: disable=attribute-defined-outside-init
self.Controller2Feature = {
controller: feature
for feature, controllers in Feature2Controller.items()
for controller in controllers} # type: ignore
@PM.add_hook
def get_options(self):
return [Option(
name=self.OPTION_FMT.format(feature),
default=(feature not in PREDISABLED_FEATURES),
type='bool',) for feature in Features]
@PM.add_hook
def register_commands(self):
@CLICommand("dashboard feature")
def cmd(mgr,
action: Actions = Actions.STATUS,
features: Optional[List[Features]] = None):
'''
Enable or disable features in Ceph-Mgr Dashboard
'''
ret = 0
msg = []
if action in [Actions.ENABLE, Actions.DISABLE]:
if features is None:
ret = 1
msg = ["At least one feature must be specified"]
else:
for feature in features:
mgr.set_module_option(
self.OPTION_FMT.format(feature),
action == Actions.ENABLE)
msg += ["Feature '{.value}': {}".format(
feature,
'enabled' if action == Actions.ENABLE else
'disabled')]
else:
for feature in features or list(Features):
enabled = mgr.get_module_option(self.OPTION_FMT.format(feature))
msg += ["Feature '{.value}': {}".format(
feature,
'enabled' if enabled else 'disabled')]
return ret, '\n'.join(msg), ''
return {'handle_command': cmd}
@no_type_check # https://github.com/python/mypy/issues/7806
def _get_feature_from_request(self, request):
try:
return self.Controller2Feature[
request.handler.callable.__self__]
except (AttributeError, KeyError):
return None
@ttl_cache(ttl=CACHE_TTL, maxsize=CACHE_MAX_SIZE)
@no_type_check # https://github.com/python/mypy/issues/7806
def _is_feature_enabled(self, feature):
return self.mgr.get_module_option(self.OPTION_FMT.format(feature))
@PM.add_hook
def filter_request_before_handler(self, request):
feature = self._get_feature_from_request(request)
if feature is None:
return
if not self._is_feature_enabled(feature):
raise cherrypy.HTTPError(
404, "Feature='{}' disabled by option '{}'".format(
feature.value,
self.OPTION_FMT.format(feature),
)
)
@PM.add_hook
def get_controllers(self):
from ..controllers import APIDoc, APIRouter, EndpointDoc, RESTController
FEATURES_SCHEMA = {
"rbd": (bool, ''),
"mirroring": (bool, ''),
"iscsi": (bool, ''),
"cephfs": (bool, ''),
"rgw": (bool, ''),
"nfs": (bool, ''),
"dashboard": (bool, '')
}
@APIRouter('/feature_toggles')
@APIDoc("Manage Features API", "FeatureTogglesEndpoint")
class FeatureTogglesEndpoint(RESTController):
@EndpointDoc("Get List Of Features",
responses={200: FEATURES_SCHEMA})
def list(_): # pylint: disable=no-self-argument # noqa: N805
return {
# pylint: disable=protected-access
feature.value: self._is_feature_enabled(feature)
for feature in Features
}
return [FeatureTogglesEndpoint]
| 5,576 | 33.85625 | 84 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/plugins/interfaces.py
|
# -*- coding: utf-8 -*-
from . import PLUGIN_MANAGER as PM # pylint: disable=cyclic-import
from . import Interface, Mixin
class CanMgr(Mixin):
from .. import mgr
mgr = mgr # type: ignore
class CanCherrypy(Mixin):
import cherrypy
request = cherrypy.request
response = cherrypy.response
@PM.add_interface
class Initializable(Interface):
@PM.add_abcspec
def init(self):
"""
Placeholder for module scope initialization
"""
@PM.add_interface
class Setupable(Interface):
@PM.add_abcspec
def setup(self):
"""
Placeholder for plugin setup, right after server start.
CanMgr.mgr is initialized by then.
"""
@PM.add_interface
class HasOptions(Interface):
@PM.add_abcspec
def get_options(self):
pass
@PM.add_interface
class HasCommands(Interface):
@PM.add_abcspec
def register_commands(self):
pass
@PM.add_interface
class HasControllers(Interface):
@PM.add_abcspec
def get_controllers(self):
pass
@PM.add_interface
class ConfiguresCherryPy(Interface):
@PM.add_abcspec
def configure_cherrypy(self, config):
pass
class FilterRequest(object):
@PM.add_interface
class BeforeHandler(Interface):
@PM.add_abcspec
def filter_request_before_handler(self, request):
pass
@PM.add_interface
class ConfigNotify(Interface):
@PM.add_abcspec
def config_notify(self):
"""
This method is called whenever a option of this mgr module has
been modified.
"""
| 1,580 | 18.518519 | 70 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/plugins/lru_cache.py
|
# -*- coding: utf-8 -*-
"""
This is a minimal implementation of lru_cache function.
Based on Python 3 functools and backports.functools_lru_cache.
"""
from collections import OrderedDict
from functools import wraps
from threading import RLock
def lru_cache(maxsize=128, typed=False):
if typed is not False:
raise NotImplementedError("typed caching not supported")
def decorating_function(function):
cache = OrderedDict()
stats = [0, 0]
rlock = RLock()
setattr(function, 'cache_info', lambda:
"hits={}, misses={}, maxsize={}, currsize={}".format(
stats[0], stats[1], maxsize, len(cache)))
@wraps(function)
def wrapper(*args, **kwargs):
key = args + tuple(kwargs.items())
with rlock:
if key in cache:
ret = cache[key]
del cache[key]
cache[key] = ret
stats[0] += 1
else:
ret = function(*args, **kwargs)
if len(cache) == maxsize:
cache.popitem(last=False)
cache[key] = ret
stats[1] += 1
return ret
return wrapper
return decorating_function
| 1,307 | 28.727273 | 69 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/plugins/motd.py
|
# -*- coding: utf-8 -*-
import hashlib
import json
from enum import Enum
from typing import Dict, NamedTuple, Optional
from ceph.utils import datetime_now, datetime_to_str, parse_timedelta, str_to_datetime
from mgr_module import CLICommand
from . import PLUGIN_MANAGER as PM
from .plugin import SimplePlugin as SP
class MotdSeverity(Enum):
INFO = 'info'
WARNING = 'warning'
DANGER = 'danger'
class MotdData(NamedTuple):
message: str
md5: str # The MD5 of the message.
severity: MotdSeverity
expires: str # The expiration date in ISO 8601. Does not expire if empty.
@PM.add_plugin # pylint: disable=too-many-ancestors
class Motd(SP):
NAME = 'motd'
OPTIONS = [
SP.Option(
name=NAME,
default='',
type='str',
desc='The message of the day'
)
]
@PM.add_hook
def register_commands(self):
@CLICommand("dashboard {name} get".format(name=self.NAME))
def _get(_):
stdout: str
value: str = self.get_option(self.NAME)
if not value:
stdout = 'No message of the day has been set.'
else:
data = json.loads(value)
if not data['expires']:
data['expires'] = "Never"
stdout = 'Message="{message}", severity="{severity}", ' \
'expires="{expires}"'.format(**data)
return 0, stdout, ''
@CLICommand("dashboard {name} set".format(name=self.NAME))
def _set(_, severity: MotdSeverity, expires: str, message: str):
if expires != '0':
delta = parse_timedelta(expires)
if not delta:
return 1, '', 'Invalid expires format, use "2h", "10d" or "30s"'
expires = datetime_to_str(datetime_now() + delta)
else:
expires = ''
value: str = json.dumps({
'message': message,
'md5': hashlib.md5(message.encode()).hexdigest(),
'severity': severity.value,
'expires': expires
})
self.set_option(self.NAME, value)
return 0, 'Message of the day has been set.', ''
@CLICommand("dashboard {name} clear".format(name=self.NAME))
def _clear(_):
self.set_option(self.NAME, '')
return 0, 'Message of the day has been cleared.', ''
@PM.add_hook
def get_controllers(self):
from ..controllers import RESTController, UIRouter
@UIRouter('/motd')
class MessageOfTheDay(RESTController):
def list(_) -> Optional[Dict]: # pylint: disable=no-self-argument
value: str = self.get_option(self.NAME)
if not value:
return None
data: MotdData = MotdData(**json.loads(value))
# Check if the MOTD has been expired.
if data.expires:
expires = str_to_datetime(data.expires)
if expires < datetime_now():
return None
return data._asdict()
return [MessageOfTheDay]
| 3,212 | 31.454545 | 86 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/plugins/pluggy.py
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015 holger krekel (rather uses bitbucket/hpk42)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""\
"""
CAVEAT:
This is a minimal implementation of python-pluggy (based on 0.8.0 interface:
https://github.com/pytest-dev/pluggy/releases/tag/0.8.0).
Despite being a widely available Python library, it does not reach all the
distros and releases currently targeted for Ceph Nautilus:
- CentOS/RHEL 7.5 [ ]
- CentOS/RHEL 8 [ ]
- Debian 8.0 [ ]
- Debian 9.0 [ ]
- Ubuntu 14.05 [ ]
- Ubuntu 16.04 [X]
TODO: Once this becomes available in the above distros, this file should be
REMOVED, and the fully featured python-pluggy should be used instead.
"""
try:
from typing import DefaultDict
except ImportError:
pass # For typing only
class HookspecMarker(object):
""" Dummy implementation. No spec validation. """
def __init__(self, project_name):
self.project_name = project_name
def __call__(self, function, *args, **kwargs):
""" No options supported. """
if any(args) or any(kwargs):
raise NotImplementedError(
"This is a minimal implementation of pluggy")
return function
class HookimplMarker(object):
def __init__(self, project_name):
self.project_name = project_name
def __call__(self, function, *args, **kwargs):
""" No options supported."""
if any(args) or any(kwargs):
raise NotImplementedError(
"This is a minimal implementation of pluggy")
setattr(function, self.project_name + "_impl", {})
return function
class _HookRelay(object):
"""
Provides the PluginManager.hook.<method_name>() syntax and
functionality.
"""
def __init__(self):
from collections import defaultdict
self._registry = defaultdict(list) # type: DefaultDict[str, list]
def __getattr__(self, hook_name):
return lambda *args, **kwargs: [
hook(*args, **kwargs) for hook in self._registry[hook_name]]
def _add_hookimpl(self, hook_name, hook_method):
self._registry[hook_name].append(hook_method)
class PluginManager(object):
def __init__(self, project_name):
self.project_name = project_name
self.__hook = _HookRelay()
@property
def hook(self):
return self.__hook
def parse_hookimpl_opts(self, plugin, name):
return getattr(
getattr(plugin, name),
self.project_name + "_impl",
None)
def add_hookspecs(self, module_or_class):
""" Dummy method"""
def register(self, plugin, name=None): # pylint: disable=unused-argument
for attr in dir(plugin):
if self.parse_hookimpl_opts(plugin, attr) is not None:
# pylint: disable=protected-access
self.hook._add_hookimpl(attr, getattr(plugin, attr))
| 3,898 | 32.324786 | 78 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/plugins/plugin.py
|
from typing import no_type_check
from mgr_module import Command, Option
from . import PLUGIN_MANAGER as PM
from . import interfaces as I # noqa: E741,N812
class SimplePlugin(I.CanMgr, I.HasOptions, I.HasCommands):
"""
Helper class that provides simplified creation of plugins:
- Default Mixins/Interfaces: CanMgr, HasOptions & HasCommands
- Options are defined by OPTIONS class variable, instead from get_options hook
- Commands are created with by COMMANDS list of Commands() and handlers
(less compact than CLICommand, but allows using method instances)
"""
Option = Option
Command = Command
@PM.add_hook
def get_options(self):
return self.OPTIONS # type: ignore
@PM.final
@no_type_check # https://github.com/python/mypy/issues/7806
def get_option(self, option):
return self.mgr.get_module_option(option)
@PM.final
@no_type_check # https://github.com/python/mypy/issues/7806
def set_option(self, option, value):
self.mgr.set_module_option(option, value)
@PM.add_hook
@no_type_check # https://github.com/python/mypy/issues/7806
def register_commands(self):
for cmd in self.COMMANDS:
cmd.register(instance=self)
| 1,253 | 31.153846 | 82 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/plugins/ttl_cache.py
|
"""
This is a minimal implementation of TTL-ed lru_cache function.
Based on Python 3 functools and backports.functools_lru_cache.
"""
from collections import OrderedDict
from functools import wraps
from threading import RLock
from time import time
from typing import Any, Dict
try:
from typing import Tuple
except ImportError:
pass # For typing only
class TTLCache:
class CachedValue:
def __init__(self, value, timestamp):
self.value = value
self.timestamp = timestamp
def __init__(self, reference, ttl, maxsize=128):
self.reference = reference
self.ttl: int = ttl
self.maxsize = maxsize
self.cache: OrderedDict[Tuple[Any], TTLCache.CachedValue] = OrderedDict()
self.hits = 0
self.misses = 0
self.expired = 0
self.rlock = RLock()
def __getitem__(self, key):
with self.rlock:
if key not in self.cache:
self.misses += 1
raise KeyError(f'"{key}" is not set')
cached_value = self.cache[key]
if time() - cached_value.timestamp >= self.ttl:
del self.cache[key]
self.expired += 1
self.misses += 1
raise KeyError(f'"{key}" is not set')
self.hits += 1
return cached_value.value
def __setitem__(self, key, value):
with self.rlock:
if key in self.cache:
cached_value = self.cache[key]
if time() - cached_value.timestamp >= self.ttl:
self.expired += 1
if len(self.cache) == self.maxsize:
self.cache.popitem(last=False)
self.cache[key] = TTLCache.CachedValue(value, time())
def clear(self):
with self.rlock:
self.cache.clear()
def info(self) -> str:
return (f'cache={self.reference} hits={self.hits}, misses={self.misses},'
f'expired={self.expired}, maxsize={self.maxsize}, currsize={len(self.cache)}')
class CacheManager:
caches: Dict[str, TTLCache] = {}
@classmethod
def get(cls, reference: str, ttl=30, maxsize=128):
if reference in cls.caches:
return cls.caches[reference]
cls.caches[reference] = TTLCache(reference, ttl, maxsize)
return cls.caches[reference]
def ttl_cache(ttl, maxsize=128, typed=False, label: str = ''):
if typed is not False:
raise NotImplementedError("typed caching not supported")
def decorating_function(function):
cache_name = label
if not cache_name:
cache_name = function.__name__
cache = CacheManager.get(cache_name, ttl, maxsize)
@wraps(function)
def wrapper(*args, **kwargs):
key = args + tuple(kwargs.items())
try:
return cache[key]
except KeyError:
ret = function(*args, **kwargs)
cache[key] = ret
return ret
return wrapper
return decorating_function
def ttl_cache_invalidator(label: str):
def decorating_function(function):
@wraps(function)
def wrapper(*args, **kwargs):
ret = function(*args, **kwargs)
CacheManager.get(label).clear()
return ret
return wrapper
return decorating_function
| 3,379 | 28.391304 | 94 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/services/__init__.py
|
# -*- coding: utf-8 -*-
| 24 | 11.5 | 23 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/services/_paginate.py
|
from typing import Any, Dict, List
from ..exceptions import DashboardException
class ListPaginator:
# pylint: disable=W0102
def __init__(self, offset: int, limit: int, sort: str, search: str,
input_list: List[Any], default_sort: str,
searchable_params: List[str] = [], sortable_params: List[str] = []):
self.offset = offset
if limit < -1:
raise DashboardException(msg=f'Wrong limit value {limit}', code=400)
self.limit = limit
self.sort = sort
self.search = search
self.input_list = input_list
self.default_sort = default_sort
self.searchable_params = searchable_params
self.sortable_params = sortable_params
self.count = len(self.input_list)
def get_count(self):
return self.count
def find_value(self, item: Dict[str, Any], key: str):
# dot separated keys to lookup nested values
keys = key.split('.')
value = item
for nested_key in keys:
if nested_key in value:
value = value[nested_key]
else:
return ''
return value
def list(self):
end = self.offset + self.limit
# '-1' is a special number to refer to all items in list
if self.limit == -1:
end = len(self.input_list)
if not self.sort:
self.sort = self.default_sort
desc = self.sort[0] == '-'
sort_by = self.sort[1:]
if sort_by not in self.sortable_params:
sort_by = self.default_sort[1:]
# trim down by search
trimmed_list = []
if self.search:
for item in self.input_list:
for searchable_param in self.searchable_params:
value = self.find_value(item, searchable_param)
if isinstance(value, str):
if self.search in str(value):
trimmed_list.append(item)
else:
trimmed_list = self.input_list
def sort(item):
return self.find_value(item, sort_by)
sorted_list = sorted(trimmed_list, key=sort, reverse=desc)
self.count = len(sorted_list)
for item in sorted_list[self.offset:end]:
yield item
| 2,314 | 31.152778 | 85 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/services/access_control.py
|
# -*- coding: utf-8 -*-
# pylint: disable=too-many-arguments,too-many-return-statements
# pylint: disable=too-many-branches, too-many-locals, too-many-statements
import errno
import json
import logging
import re
import threading
import time
from datetime import datetime, timedelta
from string import ascii_lowercase, ascii_uppercase, digits, punctuation
from typing import List, Optional, Sequence
import bcrypt
from mgr_module import CLICheckNonemptyFileInput, CLIReadCommand, CLIWriteCommand
from mgr_util import password_hash
from .. import mgr
from ..exceptions import PasswordPolicyException, PermissionNotValid, \
PwdExpirationDateNotValid, RoleAlreadyExists, RoleDoesNotExist, \
RoleIsAssociatedWithUser, RoleNotInUser, ScopeNotInRole, ScopeNotValid, \
UserAlreadyExists, UserDoesNotExist
from ..security import Permission, Scope
from ..settings import Settings
logger = logging.getLogger('access_control')
DEFAULT_FILE_DESC = 'password/secret'
_P = Permission # short alias
class PasswordPolicy(object):
def __init__(self, password, username=None, old_password=None):
"""
:param password: The new plain password.
:type password: str
:param username: The name of the user.
:type username: str | None
:param old_password: The old plain password.
:type old_password: str | None
"""
self.password = password
self.username = username
self.old_password = old_password
self.forbidden_words = Settings.PWD_POLICY_EXCLUSION_LIST.split(',')
self.complexity_credits = 0
@staticmethod
def _check_if_contains_word(password, word):
return re.compile('(?:{0})'.format(word),
flags=re.IGNORECASE).search(password)
def check_password_complexity(self):
if not Settings.PWD_POLICY_CHECK_COMPLEXITY_ENABLED:
return Settings.PWD_POLICY_MIN_COMPLEXITY
digit_credit = 1
small_letter_credit = 1
big_letter_credit = 2
special_character_credit = 3
other_character_credit = 5
self.complexity_credits = 0
for ch in self.password:
if ch in ascii_uppercase:
self.complexity_credits += big_letter_credit
elif ch in ascii_lowercase:
self.complexity_credits += small_letter_credit
elif ch in digits:
self.complexity_credits += digit_credit
elif ch in punctuation:
self.complexity_credits += special_character_credit
else:
self.complexity_credits += other_character_credit
return self.complexity_credits
def check_is_old_password(self):
if not Settings.PWD_POLICY_CHECK_OLDPWD_ENABLED:
return False
return self.old_password and self.password == self.old_password
def check_if_contains_username(self):
if not Settings.PWD_POLICY_CHECK_USERNAME_ENABLED:
return False
if not self.username:
return False
return self._check_if_contains_word(self.password, self.username)
def check_if_contains_forbidden_words(self):
if not Settings.PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED:
return False
return self._check_if_contains_word(self.password,
'|'.join(self.forbidden_words))
def check_if_sequential_characters(self):
if not Settings.PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED:
return False
for i in range(1, len(self.password) - 1):
if ord(self.password[i - 1]) + 1 == ord(self.password[i])\
== ord(self.password[i + 1]) - 1:
return True
return False
def check_if_repetitive_characters(self):
if not Settings.PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED:
return False
for i in range(1, len(self.password) - 1):
if self.password[i - 1] == self.password[i] == self.password[i + 1]:
return True
return False
def check_password_length(self):
if not Settings.PWD_POLICY_CHECK_LENGTH_ENABLED:
return True
return len(self.password) >= Settings.PWD_POLICY_MIN_LENGTH
def check_all(self):
"""
Perform all password policy checks.
:raise PasswordPolicyException: If a password policy check fails.
"""
if not Settings.PWD_POLICY_ENABLED:
return
if self.check_password_complexity() < Settings.PWD_POLICY_MIN_COMPLEXITY:
raise PasswordPolicyException('Password is too weak.')
if not self.check_password_length():
raise PasswordPolicyException('Password is too weak.')
if self.check_is_old_password():
raise PasswordPolicyException('Password must not be the same as the previous one.')
if self.check_if_contains_username():
raise PasswordPolicyException('Password must not contain username.')
result = self.check_if_contains_forbidden_words()
if result:
raise PasswordPolicyException('Password must not contain the keyword "{}".'.format(
result.group(0)))
if self.check_if_repetitive_characters():
raise PasswordPolicyException('Password must not contain repetitive characters.')
if self.check_if_sequential_characters():
raise PasswordPolicyException('Password must not contain sequential characters.')
class Role(object):
def __init__(self, name, description=None, scope_permissions=None):
self.name = name
self.description = description
if scope_permissions is None:
self.scopes_permissions = {}
else:
self.scopes_permissions = scope_permissions
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return self.name == other.name
def set_scope_permissions(self, scope, permissions):
if not Scope.valid_scope(scope):
raise ScopeNotValid(scope)
for perm in permissions:
if not Permission.valid_permission(perm):
raise PermissionNotValid(perm)
permissions.sort()
self.scopes_permissions[scope] = permissions
def del_scope_permissions(self, scope):
if scope not in self.scopes_permissions:
raise ScopeNotInRole(scope, self.name)
del self.scopes_permissions[scope]
def reset_scope_permissions(self):
self.scopes_permissions = {}
def authorize(self, scope, permissions):
if scope in self.scopes_permissions:
role_perms = self.scopes_permissions[scope]
for perm in permissions:
if perm not in role_perms:
return False
return True
return False
def to_dict(self):
return {
'name': self.name,
'description': self.description,
'scopes_permissions': self.scopes_permissions
}
@classmethod
def from_dict(cls, r_dict):
return Role(r_dict['name'], r_dict['description'],
r_dict['scopes_permissions'])
# static pre-defined system roles
# this roles cannot be deleted nor updated
# admin role provides all permissions for all scopes
ADMIN_ROLE = Role(
'administrator', 'allows full permissions for all security scopes', {
scope_name: Permission.all_permissions()
for scope_name in Scope.all_scopes()
})
# read-only role provides read-only permission for all scopes
READ_ONLY_ROLE = Role(
'read-only',
'allows read permission for all security scope except dashboard settings and config-opt', {
scope_name: [_P.READ] for scope_name in Scope.all_scopes()
if scope_name not in (Scope.DASHBOARD_SETTINGS, Scope.CONFIG_OPT)
})
# block manager role provides all permission for block related scopes
BLOCK_MGR_ROLE = Role(
'block-manager', 'allows full permissions for rbd-image, rbd-mirroring, and iscsi scopes', {
Scope.RBD_IMAGE: [_P.READ, _P.CREATE, _P.UPDATE, _P.DELETE],
Scope.POOL: [_P.READ],
Scope.ISCSI: [_P.READ, _P.CREATE, _P.UPDATE, _P.DELETE],
Scope.RBD_MIRRORING: [_P.READ, _P.CREATE, _P.UPDATE, _P.DELETE],
Scope.GRAFANA: [_P.READ],
})
# RadosGW manager role provides all permissions for block related scopes
RGW_MGR_ROLE = Role(
'rgw-manager', 'allows full permissions for the rgw scope', {
Scope.RGW: [_P.READ, _P.CREATE, _P.UPDATE, _P.DELETE],
Scope.GRAFANA: [_P.READ],
})
# Cluster manager role provides all permission for OSDs, Monitors, and
# Config options
CLUSTER_MGR_ROLE = Role(
'cluster-manager', """allows full permissions for the hosts, osd, mon, mgr,
and config-opt scopes""", {
Scope.HOSTS: [_P.READ, _P.CREATE, _P.UPDATE, _P.DELETE],
Scope.OSD: [_P.READ, _P.CREATE, _P.UPDATE, _P.DELETE],
Scope.MONITOR: [_P.READ, _P.CREATE, _P.UPDATE, _P.DELETE],
Scope.MANAGER: [_P.READ, _P.CREATE, _P.UPDATE, _P.DELETE],
Scope.CONFIG_OPT: [_P.READ, _P.CREATE, _P.UPDATE, _P.DELETE],
Scope.LOG: [_P.READ, _P.CREATE, _P.UPDATE, _P.DELETE],
Scope.GRAFANA: [_P.READ],
})
# Pool manager role provides all permissions for pool related scopes
POOL_MGR_ROLE = Role(
'pool-manager', 'allows full permissions for the pool scope', {
Scope.POOL: [_P.READ, _P.CREATE, _P.UPDATE, _P.DELETE],
Scope.GRAFANA: [_P.READ],
})
# CephFS manager role provides all permissions for CephFS related scopes
CEPHFS_MGR_ROLE = Role(
'cephfs-manager', 'allows full permissions for the cephfs scope', {
Scope.CEPHFS: [_P.READ, _P.CREATE, _P.UPDATE, _P.DELETE],
Scope.GRAFANA: [_P.READ],
})
GANESHA_MGR_ROLE = Role(
'ganesha-manager', 'allows full permissions for the nfs-ganesha scope', {
Scope.NFS_GANESHA: [_P.READ, _P.CREATE, _P.UPDATE, _P.DELETE],
Scope.CEPHFS: [_P.READ, _P.CREATE, _P.UPDATE, _P.DELETE],
Scope.RGW: [_P.READ, _P.CREATE, _P.UPDATE, _P.DELETE],
Scope.GRAFANA: [_P.READ],
})
SYSTEM_ROLES = {
ADMIN_ROLE.name: ADMIN_ROLE,
READ_ONLY_ROLE.name: READ_ONLY_ROLE,
BLOCK_MGR_ROLE.name: BLOCK_MGR_ROLE,
RGW_MGR_ROLE.name: RGW_MGR_ROLE,
CLUSTER_MGR_ROLE.name: CLUSTER_MGR_ROLE,
POOL_MGR_ROLE.name: POOL_MGR_ROLE,
CEPHFS_MGR_ROLE.name: CEPHFS_MGR_ROLE,
GANESHA_MGR_ROLE.name: GANESHA_MGR_ROLE,
}
class User(object):
def __init__(self, username, password, name=None, email=None, roles=None,
last_update=None, enabled=True, pwd_expiration_date=None,
pwd_update_required=False):
self.username = username
self.password = password
self.name = name
self.email = email
self.invalid_auth_attempt = 0
if roles is None:
self.roles = set()
else:
self.roles = roles
if last_update is None:
self.refresh_last_update()
else:
self.last_update = last_update
self._enabled = enabled
self.pwd_expiration_date = pwd_expiration_date
if self.pwd_expiration_date is None:
self.refresh_pwd_expiration_date()
self.pwd_update_required = pwd_update_required
def refresh_last_update(self):
self.last_update = int(time.time())
def refresh_pwd_expiration_date(self):
if Settings.USER_PWD_EXPIRATION_SPAN > 0:
expiration_date = datetime.utcnow() + timedelta(
days=Settings.USER_PWD_EXPIRATION_SPAN)
self.pwd_expiration_date = int(time.mktime(expiration_date.timetuple()))
else:
self.pwd_expiration_date = None
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
self.refresh_last_update()
def set_password(self, password):
self.set_password_hash(password_hash(password))
def set_password_hash(self, hashed_password):
self.invalid_auth_attempt = 0
self.password = hashed_password
self.refresh_last_update()
self.refresh_pwd_expiration_date()
self.pwd_update_required = False
def compare_password(self, password):
"""
Compare the specified password with the user password.
:param password: The plain password to check.
:type password: str
:return: `True` if the passwords are equal, otherwise `False`.
:rtype: bool
"""
pass_hash = password_hash(password, salt_password=self.password)
return pass_hash == self.password
def is_pwd_expired(self):
if self.pwd_expiration_date:
current_time = int(time.mktime(datetime.utcnow().timetuple()))
return self.pwd_expiration_date < current_time
return False
def set_roles(self, roles):
self.roles = set(roles)
self.refresh_last_update()
def add_roles(self, roles):
self.roles = self.roles.union(set(roles))
self.refresh_last_update()
def del_roles(self, roles):
for role in roles:
if role not in self.roles:
raise RoleNotInUser(role.name, self.username)
self.roles.difference_update(set(roles))
self.refresh_last_update()
def authorize(self, scope, permissions):
if self.pwd_update_required:
return False
for role in self.roles:
if role.authorize(scope, permissions):
return True
return False
def permissions_dict(self):
# type: () -> dict
perms = {} # type: dict
for role in self.roles:
for scope, perms_list in role.scopes_permissions.items():
if scope in perms:
perms_tmp = set(perms[scope]).union(set(perms_list))
perms[scope] = list(perms_tmp)
else:
perms[scope] = perms_list
return perms
def to_dict(self):
return {
'username': self.username,
'password': self.password,
'roles': sorted([r.name for r in self.roles]),
'name': self.name,
'email': self.email,
'lastUpdate': self.last_update,
'enabled': self.enabled,
'pwdExpirationDate': self.pwd_expiration_date,
'pwdUpdateRequired': self.pwd_update_required
}
@classmethod
def from_dict(cls, u_dict, roles):
return User(u_dict['username'], u_dict['password'], u_dict['name'],
u_dict['email'], {roles[r] for r in u_dict['roles']},
u_dict['lastUpdate'], u_dict['enabled'],
u_dict['pwdExpirationDate'], u_dict['pwdUpdateRequired'])
class AccessControlDB(object):
VERSION = 2
ACDB_CONFIG_KEY = "accessdb_v"
def __init__(self, version, users, roles):
self.users = users
self.version = version
self.roles = roles
self.lock = threading.RLock()
def create_role(self, name, description=None):
with self.lock:
if name in SYSTEM_ROLES or name in self.roles:
raise RoleAlreadyExists(name)
role = Role(name, description)
self.roles[name] = role
return role
def get_role(self, name):
with self.lock:
if name not in self.roles:
raise RoleDoesNotExist(name)
return self.roles[name]
def increment_attempt(self, username):
with self.lock:
if username in self.users:
self.users[username].invalid_auth_attempt += 1
def reset_attempt(self, username):
with self.lock:
if username in self.users:
self.users[username].invalid_auth_attempt = 0
def get_attempt(self, username):
with self.lock:
try:
return self.users[username].invalid_auth_attempt
except KeyError:
return 0
def delete_role(self, name):
with self.lock:
if name not in self.roles:
raise RoleDoesNotExist(name)
role = self.roles[name]
# check if role is not associated with a user
for username, user in self.users.items():
if role in user.roles:
raise RoleIsAssociatedWithUser(name, username)
del self.roles[name]
def create_user(self, username, password, name, email, enabled=True,
pwd_expiration_date=None, pwd_update_required=False):
logger.debug("creating user: username=%s", username)
with self.lock:
if username in self.users:
raise UserAlreadyExists(username)
if pwd_expiration_date and \
(pwd_expiration_date < int(time.mktime(datetime.utcnow().timetuple()))):
raise PwdExpirationDateNotValid()
user = User(username, password_hash(password), name, email, enabled=enabled,
pwd_expiration_date=pwd_expiration_date,
pwd_update_required=pwd_update_required)
self.users[username] = user
return user
def get_user(self, username):
with self.lock:
if username not in self.users:
raise UserDoesNotExist(username)
return self.users[username]
def delete_user(self, username):
with self.lock:
if username not in self.users:
raise UserDoesNotExist(username)
del self.users[username]
def update_users_with_roles(self, role):
with self.lock:
if not role:
return
for _, user in self.users.items():
if role in user.roles:
user.refresh_last_update()
def save(self):
with self.lock:
db = {
'users': {un: u.to_dict() for un, u in self.users.items()},
'roles': {rn: r.to_dict() for rn, r in self.roles.items()},
'version': self.version
}
mgr.set_store(self.accessdb_config_key(), json.dumps(db))
@classmethod
def accessdb_config_key(cls, version=None):
if version is None:
version = cls.VERSION
return "{}{}".format(cls.ACDB_CONFIG_KEY, version)
def check_and_update_db(self):
logger.debug("Checking for previous DB versions")
def check_migrate_v1_to_current():
# Check if version 1 exists in the DB and migrate it to current version
v1_db = mgr.get_store(self.accessdb_config_key(1))
if v1_db:
logger.debug("Found database v1 credentials")
v1_db = json.loads(v1_db)
for user, _ in v1_db['users'].items():
v1_db['users'][user]['enabled'] = True
v1_db['users'][user]['pwdExpirationDate'] = None
v1_db['users'][user]['pwdUpdateRequired'] = False
self.roles = {rn: Role.from_dict(r) for rn, r in v1_db.get('roles', {}).items()}
self.users = {un: User.from_dict(u, dict(self.roles, **SYSTEM_ROLES))
for un, u in v1_db.get('users', {}).items()}
self.save()
check_migrate_v1_to_current()
@classmethod
def load(cls):
logger.info("Loading user roles DB version=%s", cls.VERSION)
json_db = mgr.get_store(cls.accessdb_config_key())
if json_db is None:
logger.debug("No DB v%s found, creating new...", cls.VERSION)
db = cls(cls.VERSION, {}, {})
# check if we can update from a previous version database
db.check_and_update_db()
return db
dict_db = json.loads(json_db)
roles = {rn: Role.from_dict(r)
for rn, r in dict_db.get('roles', {}).items()}
users = {un: User.from_dict(u, dict(roles, **SYSTEM_ROLES))
for un, u in dict_db.get('users', {}).items()}
return cls(dict_db['version'], users, roles)
def load_access_control_db():
mgr.ACCESS_CTRL_DB = AccessControlDB.load() # type: ignore
# CLI dashboard access control scope commands
@CLIWriteCommand('dashboard set-login-credentials')
@CLICheckNonemptyFileInput(desc=DEFAULT_FILE_DESC)
def set_login_credentials_cmd(_, username: str, inbuf: str):
'''
Set the login credentials. Password read from -i <file>
'''
password = inbuf
try:
user = mgr.ACCESS_CTRL_DB.get_user(username)
user.set_password(password)
except UserDoesNotExist:
user = mgr.ACCESS_CTRL_DB.create_user(username, password, None, None)
user.set_roles([ADMIN_ROLE])
mgr.ACCESS_CTRL_DB.save()
return 0, '''\
******************************************************************
*** WARNING: this command is deprecated. ***
*** Please use the ac-user-* related commands to manage users. ***
******************************************************************
Username and password updated''', ''
@CLIReadCommand('dashboard ac-role-show')
def ac_role_show_cmd(_, rolename: Optional[str] = None):
'''
Show role info
'''
if not rolename:
roles = dict(mgr.ACCESS_CTRL_DB.roles)
roles.update(SYSTEM_ROLES)
roles_list = [name for name, _ in roles.items()]
return 0, json.dumps(roles_list), ''
try:
role = mgr.ACCESS_CTRL_DB.get_role(rolename)
except RoleDoesNotExist as ex:
if rolename not in SYSTEM_ROLES:
return -errno.ENOENT, '', str(ex)
role = SYSTEM_ROLES[rolename]
return 0, json.dumps(role.to_dict()), ''
@CLIWriteCommand('dashboard ac-role-create')
def ac_role_create_cmd(_, rolename: str, description: Optional[str] = None):
'''
Create a new access control role
'''
try:
role = mgr.ACCESS_CTRL_DB.create_role(rolename, description)
mgr.ACCESS_CTRL_DB.save()
return 0, json.dumps(role.to_dict()), ''
except RoleAlreadyExists as ex:
return -errno.EEXIST, '', str(ex)
@CLIWriteCommand('dashboard ac-role-delete')
def ac_role_delete_cmd(_, rolename: str):
'''
Delete an access control role
'''
try:
mgr.ACCESS_CTRL_DB.delete_role(rolename)
mgr.ACCESS_CTRL_DB.save()
return 0, "Role '{}' deleted".format(rolename), ""
except RoleDoesNotExist as ex:
if rolename in SYSTEM_ROLES:
return -errno.EPERM, '', "Cannot delete system role '{}'" \
.format(rolename)
return -errno.ENOENT, '', str(ex)
except RoleIsAssociatedWithUser as ex:
return -errno.EPERM, '', str(ex)
@CLIWriteCommand('dashboard ac-role-add-scope-perms')
def ac_role_add_scope_perms_cmd(_,
rolename: str,
scopename: str,
permissions: Sequence[str]):
'''
Add the scope permissions for a role
'''
try:
role = mgr.ACCESS_CTRL_DB.get_role(rolename)
perms_array = [perm.strip() for perm in permissions]
role.set_scope_permissions(scopename, perms_array)
mgr.ACCESS_CTRL_DB.update_users_with_roles(role)
mgr.ACCESS_CTRL_DB.save()
return 0, json.dumps(role.to_dict()), ''
except RoleDoesNotExist as ex:
if rolename in SYSTEM_ROLES:
return -errno.EPERM, '', "Cannot update system role '{}'" \
.format(rolename)
return -errno.ENOENT, '', str(ex)
except ScopeNotValid as ex:
return -errno.EINVAL, '', str(ex) + "\n Possible values: {}" \
.format(Scope.all_scopes())
except PermissionNotValid as ex:
return -errno.EINVAL, '', str(ex) + \
"\n Possible values: {}" \
.format(Permission.all_permissions())
@CLIWriteCommand('dashboard ac-role-del-scope-perms')
def ac_role_del_scope_perms_cmd(_, rolename: str, scopename: str):
'''
Delete the scope permissions for a role
'''
try:
role = mgr.ACCESS_CTRL_DB.get_role(rolename)
role.del_scope_permissions(scopename)
mgr.ACCESS_CTRL_DB.update_users_with_roles(role)
mgr.ACCESS_CTRL_DB.save()
return 0, json.dumps(role.to_dict()), ''
except RoleDoesNotExist as ex:
if rolename in SYSTEM_ROLES:
return -errno.EPERM, '', "Cannot update system role '{}'" \
.format(rolename)
return -errno.ENOENT, '', str(ex)
except ScopeNotInRole as ex:
return -errno.ENOENT, '', str(ex)
@CLIReadCommand('dashboard ac-user-show')
def ac_user_show_cmd(_, username: Optional[str] = None):
'''
Show user info
'''
if not username:
users = mgr.ACCESS_CTRL_DB.users
users_list = [name for name, _ in users.items()]
return 0, json.dumps(users_list), ''
try:
user = mgr.ACCESS_CTRL_DB.get_user(username)
return 0, json.dumps(user.to_dict()), ''
except UserDoesNotExist as ex:
return -errno.ENOENT, '', str(ex)
@CLIWriteCommand('dashboard ac-user-create')
@CLICheckNonemptyFileInput(desc=DEFAULT_FILE_DESC)
def ac_user_create_cmd(_, username: str, inbuf: str,
rolename: Optional[str] = None,
name: Optional[str] = None,
email: Optional[str] = None,
enabled: bool = True,
force_password: bool = False,
pwd_expiration_date: Optional[int] = None,
pwd_update_required: bool = False):
'''
Create a user. Password read from -i <file>
'''
password = inbuf
try:
role = mgr.ACCESS_CTRL_DB.get_role(rolename) if rolename else None
except RoleDoesNotExist as ex:
if rolename not in SYSTEM_ROLES:
return -errno.ENOENT, '', str(ex)
role = SYSTEM_ROLES[rolename]
try:
if not force_password:
pw_check = PasswordPolicy(password, username)
pw_check.check_all()
user = mgr.ACCESS_CTRL_DB.create_user(username, password, name, email,
enabled, pwd_expiration_date,
pwd_update_required)
except PasswordPolicyException as ex:
return -errno.EINVAL, '', str(ex)
except UserAlreadyExists as ex:
return 0, str(ex), ''
if role:
user.set_roles([role])
mgr.ACCESS_CTRL_DB.save()
return 0, json.dumps(user.to_dict()), ''
@CLIWriteCommand('dashboard ac-user-enable')
def ac_user_enable(_, username: str):
'''
Enable a user
'''
try:
user = mgr.ACCESS_CTRL_DB.get_user(username)
user.enabled = True
mgr.ACCESS_CTRL_DB.reset_attempt(username)
mgr.ACCESS_CTRL_DB.save()
return 0, json.dumps(user.to_dict()), ''
except UserDoesNotExist as ex:
return -errno.ENOENT, '', str(ex)
@CLIWriteCommand('dashboard ac-user-disable')
def ac_user_disable(_, username: str):
'''
Disable a user
'''
try:
user = mgr.ACCESS_CTRL_DB.get_user(username)
user.enabled = False
mgr.ACCESS_CTRL_DB.save()
return 0, json.dumps(user.to_dict()), ''
except UserDoesNotExist as ex:
return -errno.ENOENT, '', str(ex)
@CLIWriteCommand('dashboard ac-user-delete')
def ac_user_delete_cmd(_, username: str):
'''
Delete user
'''
try:
mgr.ACCESS_CTRL_DB.delete_user(username)
mgr.ACCESS_CTRL_DB.save()
return 0, "User '{}' deleted".format(username), ""
except UserDoesNotExist as ex:
return -errno.ENOENT, '', str(ex)
@CLIWriteCommand('dashboard ac-user-set-roles')
def ac_user_set_roles_cmd(_, username: str, roles: Sequence[str]):
'''
Set user roles
'''
rolesname = roles
roles: List[Role] = []
for rolename in rolesname:
try:
roles.append(mgr.ACCESS_CTRL_DB.get_role(rolename))
except RoleDoesNotExist as ex:
if rolename not in SYSTEM_ROLES:
return -errno.ENOENT, '', str(ex)
roles.append(SYSTEM_ROLES[rolename])
try:
user = mgr.ACCESS_CTRL_DB.get_user(username)
user.set_roles(roles)
mgr.ACCESS_CTRL_DB.save()
return 0, json.dumps(user.to_dict()), ''
except UserDoesNotExist as ex:
return -errno.ENOENT, '', str(ex)
@CLIWriteCommand('dashboard ac-user-add-roles')
def ac_user_add_roles_cmd(_, username: str, roles: Sequence[str]):
'''
Add roles to user
'''
rolesname = roles
roles: List[Role] = []
for rolename in rolesname:
try:
roles.append(mgr.ACCESS_CTRL_DB.get_role(rolename))
except RoleDoesNotExist as ex:
if rolename not in SYSTEM_ROLES:
return -errno.ENOENT, '', str(ex)
roles.append(SYSTEM_ROLES[rolename])
try:
user = mgr.ACCESS_CTRL_DB.get_user(username)
user.add_roles(roles)
mgr.ACCESS_CTRL_DB.save()
return 0, json.dumps(user.to_dict()), ''
except UserDoesNotExist as ex:
return -errno.ENOENT, '', str(ex)
@CLIWriteCommand('dashboard ac-user-del-roles')
def ac_user_del_roles_cmd(_, username: str, roles: Sequence[str]):
'''
Delete roles from user
'''
rolesname = roles
roles: List[Role] = []
for rolename in rolesname:
try:
roles.append(mgr.ACCESS_CTRL_DB.get_role(rolename))
except RoleDoesNotExist as ex:
if rolename not in SYSTEM_ROLES:
return -errno.ENOENT, '', str(ex)
roles.append(SYSTEM_ROLES[rolename])
try:
user = mgr.ACCESS_CTRL_DB.get_user(username)
user.del_roles(roles)
mgr.ACCESS_CTRL_DB.save()
return 0, json.dumps(user.to_dict()), ''
except UserDoesNotExist as ex:
return -errno.ENOENT, '', str(ex)
except RoleNotInUser as ex:
return -errno.ENOENT, '', str(ex)
@CLIWriteCommand('dashboard ac-user-set-password')
@CLICheckNonemptyFileInput(desc=DEFAULT_FILE_DESC)
def ac_user_set_password(_, username: str, inbuf: str,
force_password: bool = False):
'''
Set user password from -i <file>
'''
password = inbuf
try:
user = mgr.ACCESS_CTRL_DB.get_user(username)
if not force_password:
pw_check = PasswordPolicy(password, user.name)
pw_check.check_all()
user.set_password(password)
mgr.ACCESS_CTRL_DB.save()
return 0, json.dumps(user.to_dict()), ''
except PasswordPolicyException as ex:
return -errno.EINVAL, '', str(ex)
except UserDoesNotExist as ex:
return -errno.ENOENT, '', str(ex)
@CLIWriteCommand('dashboard ac-user-set-password-hash')
@CLICheckNonemptyFileInput(desc=DEFAULT_FILE_DESC)
def ac_user_set_password_hash(_, username: str, inbuf: str):
'''
Set user password bcrypt hash from -i <file>
'''
hashed_password = inbuf
try:
# make sure the hashed_password is actually a bcrypt hash
bcrypt.checkpw(b'', hashed_password.encode('utf-8'))
user = mgr.ACCESS_CTRL_DB.get_user(username)
user.set_password_hash(hashed_password)
mgr.ACCESS_CTRL_DB.save()
return 0, json.dumps(user.to_dict()), ''
except ValueError:
return -errno.EINVAL, '', 'Invalid password hash'
except UserDoesNotExist as ex:
return -errno.ENOENT, '', str(ex)
@CLIWriteCommand('dashboard ac-user-set-info')
def ac_user_set_info(_, username: str, name: str, email: str):
'''
Set user info
'''
try:
user = mgr.ACCESS_CTRL_DB.get_user(username)
if name:
user.name = name
if email:
user.email = email
mgr.ACCESS_CTRL_DB.save()
return 0, json.dumps(user.to_dict()), ''
except UserDoesNotExist as ex:
return -errno.ENOENT, '', str(ex)
class LocalAuthenticator(object):
def __init__(self):
load_access_control_db()
def get_user(self, username):
return mgr.ACCESS_CTRL_DB.get_user(username)
def authenticate(self, username, password):
try:
user = mgr.ACCESS_CTRL_DB.get_user(username)
if user.password:
if user.enabled and user.compare_password(password) \
and not user.is_pwd_expired():
return {'permissions': user.permissions_dict(),
'pwdExpirationDate': user.pwd_expiration_date,
'pwdUpdateRequired': user.pwd_update_required}
except UserDoesNotExist:
logger.debug("User '%s' does not exist", username)
return None
def authorize(self, username, scope, permissions):
user = mgr.ACCESS_CTRL_DB.get_user(username)
return user.authorize(scope, permissions)
| 33,170 | 34.176034 | 96 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/services/auth.py
|
# -*- coding: utf-8 -*-
import json
import logging
import os
import threading
import time
import uuid
from base64 import b64encode
import cherrypy
import jwt
from .. import mgr
from .access_control import LocalAuthenticator, UserDoesNotExist
cherrypy.config.update({
'response.headers.server': 'Ceph-Dashboard',
'response.headers.content-security-policy': "frame-ancestors 'self';",
'response.headers.x-content-type-options': 'nosniff',
'response.headers.strict-transport-security': 'max-age=63072000; includeSubDomains; preload'
})
class JwtManager(object):
JWT_TOKEN_BLOCKLIST_KEY = "jwt_token_block_list"
JWT_TOKEN_TTL = 28800 # default 8 hours
JWT_ALGORITHM = 'HS256'
_secret = None
LOCAL_USER = threading.local()
@staticmethod
def _gen_secret():
secret = os.urandom(16)
return b64encode(secret).decode('utf-8')
@classmethod
def init(cls):
cls.logger = logging.getLogger('jwt') # type: ignore
# generate a new secret if it does not exist
secret = mgr.get_store('jwt_secret')
if secret is None:
secret = cls._gen_secret()
mgr.set_store('jwt_secret', secret)
cls._secret = secret
@classmethod
def gen_token(cls, username):
if not cls._secret:
cls.init()
ttl = mgr.get_module_option('jwt_token_ttl', cls.JWT_TOKEN_TTL)
ttl = int(ttl)
now = int(time.time())
payload = {
'iss': 'ceph-dashboard',
'jti': str(uuid.uuid4()),
'exp': now + ttl,
'iat': now,
'username': username
}
return jwt.encode(payload, cls._secret, algorithm=cls.JWT_ALGORITHM) # type: ignore
@classmethod
def decode_token(cls, token):
if not cls._secret:
cls.init()
return jwt.decode(token, cls._secret, algorithms=cls.JWT_ALGORITHM) # type: ignore
@classmethod
def get_token_from_header(cls):
auth_cookie_name = 'token'
try:
# use cookie
return cherrypy.request.cookie[auth_cookie_name].value
except KeyError:
try:
# fall-back: use Authorization header
auth_header = cherrypy.request.headers.get('authorization')
if auth_header is not None:
scheme, params = auth_header.split(' ', 1)
if scheme.lower() == 'bearer':
return params
except IndexError:
return None
@classmethod
def set_user(cls, username):
cls.LOCAL_USER.username = username
@classmethod
def reset_user(cls):
cls.set_user(None)
@classmethod
def get_username(cls):
return getattr(cls.LOCAL_USER, 'username', None)
@classmethod
def get_user(cls, token):
try:
dtoken = JwtManager.decode_token(token)
if not JwtManager.is_blocklisted(dtoken['jti']):
user = AuthManager.get_user(dtoken['username'])
if user.last_update <= dtoken['iat']:
return user
cls.logger.debug( # type: ignore
"user info changed after token was issued, iat=%s last_update=%s",
dtoken['iat'], user.last_update
)
else:
cls.logger.debug('Token is block-listed') # type: ignore
except jwt.ExpiredSignatureError:
cls.logger.debug("Token has expired") # type: ignore
except jwt.InvalidTokenError:
cls.logger.debug("Failed to decode token") # type: ignore
except UserDoesNotExist:
cls.logger.debug( # type: ignore
"Invalid token: user %s does not exist", dtoken['username']
)
return None
@classmethod
def blocklist_token(cls, token):
token = cls.decode_token(token)
blocklist_json = mgr.get_store(cls.JWT_TOKEN_BLOCKLIST_KEY)
if not blocklist_json:
blocklist_json = "{}"
bl_dict = json.loads(blocklist_json)
now = time.time()
# remove expired tokens
to_delete = []
for jti, exp in bl_dict.items():
if exp < now:
to_delete.append(jti)
for jti in to_delete:
del bl_dict[jti]
bl_dict[token['jti']] = token['exp']
mgr.set_store(cls.JWT_TOKEN_BLOCKLIST_KEY, json.dumps(bl_dict))
@classmethod
def is_blocklisted(cls, jti):
blocklist_json = mgr.get_store(cls.JWT_TOKEN_BLOCKLIST_KEY)
if not blocklist_json:
blocklist_json = "{}"
bl_dict = json.loads(blocklist_json)
return jti in bl_dict
class AuthManager(object):
AUTH_PROVIDER = None
@classmethod
def initialize(cls):
cls.AUTH_PROVIDER = LocalAuthenticator()
@classmethod
def get_user(cls, username):
return cls.AUTH_PROVIDER.get_user(username) # type: ignore
@classmethod
def authenticate(cls, username, password):
return cls.AUTH_PROVIDER.authenticate(username, password) # type: ignore
@classmethod
def authorize(cls, username, scope, permissions):
return cls.AUTH_PROVIDER.authorize(username, scope, permissions) # type: ignore
class AuthManagerTool(cherrypy.Tool):
def __init__(self):
super(AuthManagerTool, self).__init__(
'before_handler', self._check_authentication, priority=20)
self.logger = logging.getLogger('auth')
def _check_authentication(self):
JwtManager.reset_user()
token = JwtManager.get_token_from_header()
if token:
user = JwtManager.get_user(token)
if user:
self._check_authorization(user.username)
return
self.logger.debug('Unauthorized access to %s',
cherrypy.url(relative='server'))
raise cherrypy.HTTPError(401, 'You are not authorized to access '
'that resource')
def _check_authorization(self, username):
self.logger.debug("checking authorization...")
handler = cherrypy.request.handler.callable
controller = handler.__self__
sec_scope = getattr(controller, '_security_scope', None)
sec_perms = getattr(handler, '_security_permissions', None)
JwtManager.set_user(username)
if not sec_scope:
# controller does not define any authorization restrictions
return
self.logger.debug("checking '%s' access to '%s' scope", sec_perms,
sec_scope)
if not sec_perms:
self.logger.debug("Fail to check permission on: %s:%s", controller,
handler)
raise cherrypy.HTTPError(403, "You don't have permissions to "
"access that resource")
if not AuthManager.authorize(username, sec_scope, sec_perms):
raise cherrypy.HTTPError(403, "You don't have permissions to "
"access that resource")
| 7,172 | 32.362791 | 96 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/services/ceph_service.py
|
# -*- coding: utf-8 -*-
import json
import logging
import rados
from mgr_module import CommandResult
from mgr_util import get_most_recent_rate, get_time_series_rates, name_to_config_section
from .. import mgr
try:
from typing import Any, Dict, Optional, Union
except ImportError:
pass # For typing only
logger = logging.getLogger('ceph_service')
class SendCommandError(rados.Error):
def __init__(self, err, prefix, argdict, errno):
self.prefix = prefix
self.argdict = argdict
super(SendCommandError, self).__init__(err, errno)
# pylint: disable=too-many-public-methods
class CephService(object):
OSD_FLAG_NO_SCRUB = 'noscrub'
OSD_FLAG_NO_DEEP_SCRUB = 'nodeep-scrub'
PG_STATUS_SCRUBBING = 'scrubbing'
PG_STATUS_DEEP_SCRUBBING = 'deep'
SCRUB_STATUS_DISABLED = 'Disabled'
SCRUB_STATUS_ACTIVE = 'Active'
SCRUB_STATUS_INACTIVE = 'Inactive'
@classmethod
def get_service_map(cls, service_name):
service_map = {} # type: Dict[str, dict]
for server in mgr.list_servers():
for service in server['services']:
if service['type'] == service_name:
if server['hostname'] not in service_map:
service_map[server['hostname']] = {
'server': server,
'services': []
}
inst_id = service['id']
metadata = mgr.get_metadata(service_name, inst_id)
status = mgr.get_daemon_status(service_name, inst_id)
service_map[server['hostname']]['services'].append({
'id': inst_id,
'type': service_name,
'hostname': server['hostname'],
'metadata': metadata,
'status': status
})
return service_map
@classmethod
def get_service_list(cls, service_name):
service_map = cls.get_service_map(service_name)
return [svc for _, svcs in service_map.items() for svc in svcs['services']]
@classmethod
def get_service_data_by_metadata_id(cls,
service_type: str,
metadata_id: str) -> Optional[Dict[str, Any]]:
for server in mgr.list_servers():
for service in server['services']:
if service['type'] == service_type:
metadata = mgr.get_metadata(service_type, service['id'])
if metadata_id == metadata['id']:
return {
'id': metadata['id'],
'service_map_id': str(service['id']),
'type': service_type,
'hostname': server['hostname'],
'metadata': metadata
}
return None
@classmethod
def get_service(cls, service_type: str, metadata_id: str) -> Optional[Dict[str, Any]]:
svc_data = cls.get_service_data_by_metadata_id(service_type, metadata_id)
if svc_data:
svc_data['status'] = mgr.get_daemon_status(svc_data['type'], svc_data['service_map_id'])
return svc_data
@classmethod
def get_service_perf_counters(cls, service_type: str, service_id: str) -> Dict[str, Any]:
schema_dict = mgr.get_perf_schema(service_type, service_id)
schema = schema_dict["{}.{}".format(service_type, service_id)]
counters = []
for key, value in sorted(schema.items()):
counter = {'name': str(key), 'description': value['description']}
# pylint: disable=W0212
if mgr._stattype_to_str(value['type']) == 'counter':
counter['value'] = cls.get_rate(
service_type, service_id, key)
counter['unit'] = mgr._unit_to_str(value['units'])
else:
counter['value'] = mgr.get_latest(
service_type, service_id, key)
counter['unit'] = ''
counters.append(counter)
return {
'service': {
'type': service_type,
'id': str(service_id)
},
'counters': counters
}
@classmethod
def get_pool_list(cls, application=None):
osd_map = mgr.get('osd_map')
if not application:
return osd_map['pools']
return [pool for pool in osd_map['pools']
if application in pool.get('application_metadata', {})]
@classmethod
def get_pool_list_with_stats(cls, application=None):
# pylint: disable=too-many-locals
pools = cls.get_pool_list(application)
pools_w_stats = []
pg_summary = mgr.get("pg_summary")
pool_stats = mgr.get_updated_pool_stats()
for pool in pools:
pool['pg_status'] = pg_summary['by_pool'][pool['pool'].__str__()]
stats = pool_stats[pool['pool']]
s = {}
for stat_name, stat_series in stats.items():
rates = get_time_series_rates(stat_series)
s[stat_name] = {
'latest': stat_series[0][1],
'rate': get_most_recent_rate(rates),
'rates': rates
}
pool['stats'] = s
pools_w_stats.append(pool)
return pools_w_stats
@classmethod
def get_erasure_code_profiles(cls):
def _serialize_ecp(name, ecp):
def serialize_numbers(key):
value = ecp.get(key)
if value is not None:
ecp[key] = int(value)
ecp['name'] = name
serialize_numbers('k')
serialize_numbers('m')
return ecp
ret = []
for name, ecp in mgr.get('osd_map').get('erasure_code_profiles', {}).items():
ret.append(_serialize_ecp(name, ecp))
return ret
@classmethod
def get_pool_name_from_id(cls, pool_id):
# type: (int) -> Union[str, None]
return mgr.rados.pool_reverse_lookup(pool_id)
@classmethod
def get_pool_by_attribute(cls, attribute, value):
# type: (str, Any) -> Union[dict, None]
pool_list = cls.get_pool_list()
for pool in pool_list:
if attribute in pool and pool[attribute] == value:
return pool
return None
@classmethod
def get_encryption_config(cls, daemon_name):
kms_vault_configured = False
s3_vault_configured = False
kms_backend: str = ''
sse_s3_backend: str = ''
vault_stats = []
full_daemon_name = 'rgw.' + daemon_name
kms_backend = CephService.send_command('mon', 'config get',
who=name_to_config_section(full_daemon_name),
key='rgw_crypt_s3_kms_backend')
sse_s3_backend = CephService.send_command('mon', 'config get',
who=name_to_config_section(full_daemon_name),
key='rgw_crypt_sse_s3_backend')
if kms_backend.strip() == 'vault':
kms_vault_auth: str = CephService.send_command('mon', 'config get',
who=name_to_config_section(full_daemon_name), # noqa E501 #pylint: disable=line-too-long
key='rgw_crypt_vault_auth')
kms_vault_engine: str = CephService.send_command('mon', 'config get',
who=name_to_config_section(full_daemon_name), # noqa E501 #pylint: disable=line-too-long
key='rgw_crypt_vault_secret_engine')
kms_vault_address: str = CephService.send_command('mon', 'config get',
who=name_to_config_section(full_daemon_name), # noqa E501 #pylint: disable=line-too-long
key='rgw_crypt_vault_addr')
kms_vault_token: str = CephService.send_command('mon', 'config get',
who=name_to_config_section(full_daemon_name), # noqa E501 #pylint: disable=line-too-long
key='rgw_crypt_vault_token_file') # noqa E501 #pylint: disable=line-too-long
if (kms_vault_auth.strip() != "" and kms_vault_engine.strip() != "" and kms_vault_address.strip() != ""): # noqa E501 #pylint: disable=line-too-long
if(kms_vault_auth == 'token' and kms_vault_token.strip() == ""):
kms_vault_configured = False
else:
kms_vault_configured = True
if sse_s3_backend.strip() == 'vault':
s3_vault_auth: str = CephService.send_command('mon', 'config get',
who=name_to_config_section(full_daemon_name), # noqa E501 #pylint: disable=line-too-long
key='rgw_crypt_sse_s3_vault_auth')
s3_vault_engine: str = CephService.send_command('mon',
'config get',
who=name_to_config_section(full_daemon_name), # noqa E501 #pylint: disable=line-too-long
key='rgw_crypt_sse_s3_vault_secret_engine') # noqa E501 #pylint: disable=line-too-long
s3_vault_address: str = CephService.send_command('mon', 'config get',
who=name_to_config_section(full_daemon_name), # noqa E501 #pylint: disable=line-too-long
key='rgw_crypt_sse_s3_vault_addr')
s3_vault_token: str = CephService.send_command('mon', 'config get',
who=name_to_config_section(full_daemon_name), # noqa E501 #pylint: disable=line-too-long
key='rgw_crypt_sse_s3_vault_token_file') # noqa E501 #pylint: disable=line-too-long
if (s3_vault_auth.strip() != "" and s3_vault_engine.strip() != "" and s3_vault_address.strip() != ""): # noqa E501 #pylint: disable=line-too-long
if(s3_vault_auth == 'token' and s3_vault_token.strip() == ""):
s3_vault_configured = False
else:
s3_vault_configured = True
vault_stats.append(kms_vault_configured)
vault_stats.append(s3_vault_configured)
return vault_stats
@classmethod
def set_encryption_config(cls, encryption_type, kms_provider, auth_method,
secret_engine, secret_path, namespace, address,
token, daemon_name, ssl_cert, client_cert, client_key):
full_daemon_name = 'rgw.' + daemon_name
if encryption_type == 'aws:kms':
KMS_CONFIG = [
['rgw_crypt_s3_kms_backend', kms_provider],
['rgw_crypt_vault_auth', auth_method],
['rgw_crypt_vault_prefix', secret_path],
['rgw_crypt_vault_namespace', namespace],
['rgw_crypt_vault_secret_engine', secret_engine],
['rgw_crypt_vault_addr', address],
['rgw_crypt_vault_token_file', token],
['rgw_crypt_vault_ssl_cacert', ssl_cert],
['rgw_crypt_vault_ssl_clientcert', client_cert],
['rgw_crypt_vault_ssl_clientkey', client_key]
]
for (key, value) in KMS_CONFIG:
if value == 'null':
continue
CephService.send_command('mon', 'config set',
who=name_to_config_section(full_daemon_name),
name=key, value=value)
if encryption_type == 'AES256':
SSE_S3_CONFIG = [
['rgw_crypt_sse_s3_backend', kms_provider],
['rgw_crypt_sse_s3_vault_auth', auth_method],
['rgw_crypt_sse_s3_vault_prefix', secret_path],
['rgw_crypt_sse_s3_vault_namespace', namespace],
['rgw_crypt_sse_s3_vault_secret_engine', secret_engine],
['rgw_crypt_sse_s3_vault_addr', address],
['rgw_crypt_sse_s3_vault_token_file', token],
['rgw_crypt_sse_s3_vault_ssl_cacert', ssl_cert],
['rgw_crypt_sse_s3_vault_ssl_clientcert', client_cert],
['rgw_crypt_sse_s3_vault_ssl_clientkey', client_key]
]
for (key, value) in SSE_S3_CONFIG:
if value == 'null':
continue
CephService.send_command('mon', 'config set',
who=name_to_config_section(full_daemon_name),
name=key, value=value)
return {}
@classmethod
def set_multisite_config(cls, realm_name, zonegroup_name, zone_name, daemon_name):
full_daemon_name = 'rgw.' + daemon_name
KMS_CONFIG = [
['rgw_realm', realm_name],
['rgw_zonegroup', zonegroup_name],
['rgw_zone', zone_name]
]
for (key, value) in KMS_CONFIG:
if value == 'null':
continue
CephService.send_command('mon', 'config set',
who=name_to_config_section(full_daemon_name),
name=key, value=value)
return {}
@classmethod
def get_realm_tokens(cls):
tokens_info = mgr.remote('rgw', 'get_realm_tokens')
return tokens_info
@classmethod
def import_realm_token(cls, realm_token, zone_name):
tokens_info = mgr.remote('rgw', 'import_realm_token', zone_name=zone_name,
realm_token=realm_token, start_radosgw=True)
return tokens_info
@classmethod
def get_pool_pg_status(cls, pool_name):
# type: (str) -> dict
pool = cls.get_pool_by_attribute('pool_name', pool_name)
if pool is None:
return {}
return mgr.get("pg_summary")['by_pool'][pool['pool'].__str__()]
@staticmethod
def send_command(srv_type, prefix, srv_spec='', to_json=True, inbuf='', **kwargs):
# type: (str, str, Optional[str], bool, str, Any) -> Any
"""
:type prefix: str
:param srv_type: mon |
:param kwargs: will be added to argdict
:param srv_spec: typically empty. or something like "<fs_id>:0"
:param to_json: if true return as json format
:raises PermissionError: See rados.make_ex
:raises ObjectNotFound: See rados.make_ex
:raises IOError: See rados.make_ex
:raises NoSpace: See rados.make_ex
:raises ObjectExists: See rados.make_ex
:raises ObjectBusy: See rados.make_ex
:raises NoData: See rados.make_ex
:raises InterruptedOrTimeoutError: See rados.make_ex
:raises TimedOut: See rados.make_ex
:raises ValueError: return code != 0
"""
argdict = {
"prefix": prefix,
}
if to_json:
argdict["format"] = "json"
argdict.update({k: v for k, v in kwargs.items() if v is not None})
result = CommandResult("")
mgr.send_command(result, srv_type, srv_spec, json.dumps(argdict), "", inbuf=inbuf)
r, outb, outs = result.wait()
if r != 0:
logger.error("send_command '%s' failed. (r=%s, outs=\"%s\", kwargs=%s)", prefix, r,
outs, kwargs)
raise SendCommandError(outs, prefix, argdict, r)
try:
return json.loads(outb or outs)
except Exception: # pylint: disable=broad-except
return outb
@staticmethod
def _get_smart_data_by_device(device):
# type: (dict) -> Dict[str, dict]
# Check whether the device is associated with daemons.
if 'daemons' in device and device['daemons']:
dev_smart_data: Dict[str, Any] = {}
# Get a list of all OSD daemons on all hosts that are 'up'
# because SMART data can not be retrieved from daemons that
# are 'down' or 'destroyed'.
osd_tree = CephService.send_command('mon', 'osd tree')
osd_daemons_up = [
node['name'] for node in osd_tree.get('nodes', {})
if node.get('status') == 'up'
]
# All daemons on the same host can deliver SMART data,
# thus it is not relevant for us which daemon we are using.
# NOTE: the list may contain daemons that are 'down' or 'destroyed'.
for daemon in device['daemons']:
svc_type, svc_id = daemon.split('.', 1)
if 'osd' in svc_type:
if daemon not in osd_daemons_up:
continue
try:
dev_smart_data = CephService.send_command(
svc_type, 'smart', svc_id, devid=device['devid'])
except SendCommandError as error:
logger.warning(str(error))
# Try to retrieve SMART data from another daemon.
continue
elif 'mon' in svc_type:
try:
dev_smart_data = CephService.send_command(
svc_type, 'device query-daemon-health-metrics', who=daemon)
except SendCommandError as error:
logger.warning(str(error))
# Try to retrieve SMART data from another daemon.
continue
else:
dev_smart_data = {}
CephService.log_dev_data_error(dev_smart_data)
break
return dev_smart_data
logger.warning('[SMART] No daemons associated with device ID "%s"',
device['devid'])
return {}
@staticmethod
def log_dev_data_error(dev_smart_data):
for dev_id, dev_data in dev_smart_data.items():
if 'error' in dev_data:
logger.warning(
'[SMART] Error retrieving smartctl data for device ID "%s": %s',
dev_id, dev_data)
@staticmethod
def get_devices_by_host(hostname):
# type: (str) -> dict
return CephService.send_command('mon',
'device ls-by-host',
host=hostname)
@staticmethod
def get_devices_by_daemon(daemon_type, daemon_id):
# type: (str, str) -> dict
return CephService.send_command('mon',
'device ls-by-daemon',
who='{}.{}'.format(
daemon_type, daemon_id))
@staticmethod
def get_smart_data_by_host(hostname):
# type: (str) -> dict
"""
Get the SMART data of all devices on the given host, regardless
of the daemon (osd, mon, ...).
:param hostname: The name of the host.
:return: A dictionary containing the SMART data of every device
on the given host. The device name is used as the key in the
dictionary.
"""
devices = CephService.get_devices_by_host(hostname)
smart_data = {} # type: dict
if devices:
for device in devices:
if device['devid'] not in smart_data:
smart_data.update(
CephService._get_smart_data_by_device(device))
else:
logger.debug('[SMART] could not retrieve device list from host %s', hostname)
return smart_data
@staticmethod
def get_smart_data_by_daemon(daemon_type, daemon_id):
# type: (str, str) -> Dict[str, dict]
"""
Get the SMART data of the devices associated with the given daemon.
:param daemon_type: The daemon type, e.g. 'osd' or 'mon'.
:param daemon_id: The daemon identifier.
:return: A dictionary containing the SMART data of every device
associated with the given daemon. The device name is used as the
key in the dictionary.
"""
devices = CephService.get_devices_by_daemon(daemon_type, daemon_id)
smart_data = {} # type: Dict[str, dict]
if devices:
for device in devices:
if device['devid'] not in smart_data:
smart_data.update(
CephService._get_smart_data_by_device(device))
else:
msg = '[SMART] could not retrieve device list from daemon with type %s and ' +\
'with ID %s'
logger.debug(msg, daemon_type, daemon_id)
return smart_data
@classmethod
def get_rates(cls, svc_type, svc_name, path):
"""
:return: the derivative of mgr.get_counter()
:rtype: list[tuple[int, float]]"""
data = mgr.get_counter(svc_type, svc_name, path)[path]
return get_time_series_rates(data)
@classmethod
def get_rate(cls, svc_type, svc_name, path):
"""returns most recent rate"""
return get_most_recent_rate(cls.get_rates(svc_type, svc_name, path))
@classmethod
def get_client_perf(cls):
pools_stats = mgr.get('osd_pool_stats')['pool_stats']
io_stats = {
'read_bytes_sec': 0,
'read_op_per_sec': 0,
'write_bytes_sec': 0,
'write_op_per_sec': 0,
}
recovery_stats = {'recovering_bytes_per_sec': 0}
for pool_stats in pools_stats:
client_io = pool_stats['client_io_rate']
for stat in list(io_stats.keys()):
if stat in client_io:
io_stats[stat] += client_io[stat]
client_recovery = pool_stats['recovery_rate']
for stat in list(recovery_stats.keys()):
if stat in client_recovery:
recovery_stats[stat] += client_recovery[stat]
client_perf = io_stats.copy()
client_perf.update(recovery_stats)
return client_perf
@classmethod
def get_scrub_status(cls):
enabled_flags = mgr.get('osd_map')['flags_set']
if cls.OSD_FLAG_NO_SCRUB in enabled_flags or cls.OSD_FLAG_NO_DEEP_SCRUB in enabled_flags:
return cls.SCRUB_STATUS_DISABLED
grouped_pg_statuses = mgr.get('pg_summary')['all']
for grouped_pg_status in grouped_pg_statuses.keys():
if len(grouped_pg_status.split(cls.PG_STATUS_SCRUBBING)) > 1 \
or len(grouped_pg_status.split(cls.PG_STATUS_DEEP_SCRUBBING)) > 1:
return cls.SCRUB_STATUS_ACTIVE
return cls.SCRUB_STATUS_INACTIVE
@classmethod
def get_pg_info(cls):
pg_summary = mgr.get('pg_summary')
object_stats = {stat: pg_summary['pg_stats_sum']['stat_sum'][stat] for stat in [
'num_objects', 'num_object_copies', 'num_objects_degraded',
'num_objects_misplaced', 'num_objects_unfound']}
pgs_per_osd = 0.0
total_osds = len(pg_summary['by_osd'])
if total_osds > 0:
total_pgs = 0.0
for _, osd_pg_statuses in pg_summary['by_osd'].items():
for _, pg_amount in osd_pg_statuses.items():
total_pgs += pg_amount
pgs_per_osd = total_pgs / total_osds
return {
'object_stats': object_stats,
'statuses': pg_summary['all'],
'pgs_per_osd': pgs_per_osd,
}
| 24,245 | 41.462347 | 161 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/services/cephfs.py
|
# -*- coding: utf-8 -*-
import datetime
import logging
import os
from contextlib import contextmanager
import cephfs
from .. import mgr
logger = logging.getLogger('cephfs')
class CephFS(object):
@classmethod
def list_filesystems(cls):
fsmap = mgr.get("fs_map")
return [{'id': fs['id'], 'name': fs['mdsmap']['fs_name']}
for fs in fsmap['filesystems']]
@classmethod
def fs_name_from_id(cls, fs_id):
"""
Get the filesystem name from ID.
:param fs_id: The filesystem ID.
:type fs_id: int | str
:return: The filesystem name or None.
:rtype: str | None
"""
fs_map = mgr.get("fs_map")
fs_info = list(filter(lambda x: str(x['id']) == str(fs_id),
fs_map['filesystems']))
if not fs_info:
return None
return fs_info[0]['mdsmap']['fs_name']
def __init__(self, fs_name=None):
logger.debug("initializing cephfs connection")
self.cfs = cephfs.LibCephFS(rados_inst=mgr.rados)
logger.debug("mounting cephfs filesystem: %s", fs_name)
if fs_name:
self.cfs.mount(filesystem_name=fs_name)
else:
self.cfs.mount()
logger.debug("mounted cephfs filesystem")
def __del__(self):
logger.debug("shutting down cephfs filesystem")
self.cfs.shutdown()
@contextmanager
def opendir(self, dirpath):
d = None
try:
d = self.cfs.opendir(dirpath)
yield d
finally:
if d:
self.cfs.closedir(d)
def ls_dir(self, path, depth):
"""
List directories of specified path with additional information.
:param path: The root directory path.
:type path: str | bytes
:param depth: The number of steps to go down the directory tree.
:type depth: int | str
:return: A list of directory dicts which consist of name, path,
parent, snapshots and quotas.
:rtype: list
"""
paths = self._ls_dir(path, int(depth))
# Convert (bytes => string), prettify paths (strip slashes)
# and append additional information.
return [self.get_directory(p) for p in paths if p != path.encode()]
def _ls_dir(self, path, depth):
"""
List directories of specified path.
:param path: The root directory path.
:type path: str | bytes
:param depth: The number of steps to go down the directory tree.
:type depth: int
:return: A list of directory paths (bytes encoded).
Example:
ls_dir('/photos', 1) => [
b'/photos/flowers', b'/photos/cars'
]
:rtype: list
"""
if isinstance(path, str):
path = path.encode()
logger.debug("get_dir_list dirpath=%s depth=%s", path,
depth)
if depth == 0:
return [path]
logger.debug("opening dirpath=%s", path)
with self.opendir(path) as d:
dent = self.cfs.readdir(d)
paths = [path]
while dent:
logger.debug("found entry=%s", dent.d_name)
if dent.d_name in [b'.', b'..']:
dent = self.cfs.readdir(d)
continue
if dent.is_dir():
logger.debug("found dir=%s", dent.d_name)
subdir_path = os.path.join(path, dent.d_name)
paths.extend(self._ls_dir(subdir_path, depth - 1))
dent = self.cfs.readdir(d)
return paths
def get_directory(self, path):
"""
Transforms path of directory into a meaningful dictionary.
:param path: The root directory path.
:type path: str | bytes
:return: Dict consists of name, path, parent, snapshots and quotas.
:rtype: dict
"""
path = path.decode()
not_root = path != os.sep
return {
'name': os.path.basename(path) if not_root else path,
'path': path,
'parent': os.path.dirname(path) if not_root else None,
'snapshots': self.ls_snapshots(path),
'quotas': self.get_quotas(path) if not_root else None
}
def dir_exists(self, path):
try:
with self.opendir(path):
return True
except cephfs.ObjectNotFound:
return False
def mk_dirs(self, path):
"""
Create a directory.
:param path: The path of the directory.
"""
if path == os.sep:
raise Exception('Cannot create root directory "/"')
if self.dir_exists(path):
return
logger.info("Creating directory: %s", path)
self.cfs.mkdirs(path, 0o755)
def rm_dir(self, path):
"""
Remove a directory.
:param path: The path of the directory.
"""
if path == os.sep:
raise Exception('Cannot remove root directory "/"')
if not self.dir_exists(path):
return
logger.info("Removing directory: %s", path)
self.cfs.rmdir(path)
def mk_snapshot(self, path, name=None, mode=0o755):
"""
Create a snapshot.
:param path: The path of the directory.
:type path: str
:param name: The name of the snapshot. If not specified,
a name using the current time in RFC3339 UTC format
will be generated.
:type name: str | None
:param mode: The permissions the directory should have
once created.
:type mode: int
:return: Returns the name of the snapshot.
:rtype: str
"""
if name is None:
now = datetime.datetime.now()
tz = now.astimezone().tzinfo
name = now.replace(tzinfo=tz).isoformat('T')
client_snapdir = self.cfs.conf_get('client_snapdir')
snapshot_path = os.path.join(path, client_snapdir, name)
logger.info("Creating snapshot: %s", snapshot_path)
self.cfs.mkdir(snapshot_path, mode)
return name
def ls_snapshots(self, path):
"""
List snapshots for the specified path.
:param path: The path of the directory.
:type path: str
:return: A list of dictionaries containing the name and the
creation time of the snapshot.
:rtype: list
"""
result = []
client_snapdir = self.cfs.conf_get('client_snapdir')
path = os.path.join(path, client_snapdir).encode()
with self.opendir(path) as d:
dent = self.cfs.readdir(d)
while dent:
if dent.is_dir():
if dent.d_name not in [b'.', b'..'] and not dent.d_name.startswith(b'_'):
snapshot_path = os.path.join(path, dent.d_name)
stat = self.cfs.stat(snapshot_path)
result.append({
'name': dent.d_name.decode(),
'path': snapshot_path.decode(),
'created': '{}Z'.format(stat.st_ctime.isoformat('T'))
})
dent = self.cfs.readdir(d)
return result
def rm_snapshot(self, path, name):
"""
Remove a snapshot.
:param path: The path of the directory.
:type path: str
:param name: The name of the snapshot.
:type name: str
"""
client_snapdir = self.cfs.conf_get('client_snapdir')
snapshot_path = os.path.join(path, client_snapdir, name)
logger.info("Removing snapshot: %s", snapshot_path)
self.cfs.rmdir(snapshot_path)
def get_quotas(self, path):
"""
Get the quotas of the specified path.
:param path: The path of the directory/file.
:type path: str
:return: Returns a dictionary containing 'max_bytes'
and 'max_files'.
:rtype: dict
"""
try:
max_bytes = int(self.cfs.getxattr(path, 'ceph.quota.max_bytes'))
except cephfs.NoData:
max_bytes = 0
try:
max_files = int(self.cfs.getxattr(path, 'ceph.quota.max_files'))
except cephfs.NoData:
max_files = 0
return {'max_bytes': max_bytes, 'max_files': max_files}
def set_quotas(self, path, max_bytes=None, max_files=None):
"""
Set the quotas of the specified path.
:param path: The path of the directory/file.
:type path: str
:param max_bytes: The byte limit.
:type max_bytes: int | None
:param max_files: The file limit.
:type max_files: int | None
"""
if max_bytes is not None:
self.cfs.setxattr(path, 'ceph.quota.max_bytes',
str(max_bytes).encode(), 0)
if max_files is not None:
self.cfs.setxattr(path, 'ceph.quota.max_files',
str(max_files).encode(), 0)
| 9,108 | 33.634981 | 93 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/services/cluster.py
|
# -*- coding: utf-8 -*-
from enum import Enum
from typing import NamedTuple
from .. import mgr
class ClusterCapacity(NamedTuple):
total_avail_bytes: int
total_bytes: int
total_used_raw_bytes: int
class ClusterModel:
class Status(Enum):
INSTALLED = 0
POST_INSTALLED = 1
status: Status
def __init__(self, status=Status.POST_INSTALLED.name):
"""
:param status: The status of the cluster. Assume that the cluster
is already functional by default.
:type status: str
"""
self.status = self.Status[status]
def dict(self):
return {'status': self.status.name}
def to_db(self):
mgr.set_store('cluster/status', self.status.name)
@classmethod
def from_db(cls):
"""
Get the stored cluster status from the configuration key/value store.
If the status is not set, assume it is already fully functional.
"""
return cls(status=mgr.get_store('cluster/status', cls.Status.POST_INSTALLED.name))
@classmethod
def get_capacity(cls) -> ClusterCapacity:
df = mgr.get('df')
return ClusterCapacity(total_avail_bytes=df['stats']['total_avail_bytes'],
total_bytes=df['stats']['total_bytes'],
total_used_raw_bytes=df['stats']['total_used_raw_bytes'])._asdict()
| 1,394 | 26.9 | 98 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/services/exception.py
|
# -*- coding: utf-8 -*-
import json
import logging
from contextlib import contextmanager
import cephfs
import cherrypy
import rados
import rbd
from orchestrator import OrchestratorError
from ..exceptions import DashboardException, ViewCacheNoDataException
from ..rest_client import RequestException
from ..services.ceph_service import SendCommandError
logger = logging.getLogger('exception')
def serialize_dashboard_exception(e, include_http_status=False, task=None):
"""
:type e: Exception
:param include_http_status: Used for Tasks, where the HTTP status code is not available.
"""
from ..tools import ViewCache
if isinstance(e, ViewCacheNoDataException):
return {'status': ViewCache.VALUE_NONE, 'value': None}
out = dict(detail=str(e))
try:
out['code'] = e.code
except AttributeError:
pass
component = getattr(e, 'component', None)
out['component'] = component if component else None
if include_http_status:
out['status'] = getattr(e, 'status', 500) # type: ignore
if task:
out['task'] = dict(name=task.name, metadata=task.metadata) # type: ignore
return out
# pylint: disable=broad-except
def dashboard_exception_handler(handler, *args, **kwargs):
try:
with handle_rados_error(component=None): # make the None controller the fallback.
return handler(*args, **kwargs)
# pylint: disable=try-except-raise
except (cherrypy.HTTPRedirect, cherrypy.NotFound, cherrypy.HTTPError):
raise
except (ViewCacheNoDataException, DashboardException) as error:
logger.exception('Dashboard Exception')
cherrypy.response.headers['Content-Type'] = 'application/json'
cherrypy.response.status = getattr(error, 'status', 400)
return json.dumps(serialize_dashboard_exception(error)).encode('utf-8')
except Exception as error:
logger.exception('Internal Server Error')
raise error
@contextmanager
def handle_cephfs_error():
try:
yield
except cephfs.OSError as e:
raise DashboardException(e, component='cephfs') from e
@contextmanager
def handle_rbd_error():
try:
yield
except rbd.OSError as e:
raise DashboardException(e, component='rbd')
except rbd.Error as e:
raise DashboardException(e, component='rbd', code=e.__class__.__name__)
@contextmanager
def handle_rados_error(component):
try:
yield
except rados.OSError as e:
raise DashboardException(e, component=component)
except rados.Error as e:
raise DashboardException(e, component=component, code=e.__class__.__name__)
@contextmanager
def handle_send_command_error(component):
try:
yield
except SendCommandError as e:
raise DashboardException(e, component=component)
@contextmanager
def handle_orchestrator_error(component):
try:
yield
except OrchestratorError as e:
raise DashboardException(e, component=component)
@contextmanager
def handle_request_error(component):
try:
yield
except RequestException as e:
if e.content:
content = json.loads(e.content)
content_message = content.get('message')
if content_message:
raise DashboardException(
msg=content_message, component=component)
raise DashboardException(e=e, component=component)
@contextmanager
def handle_error(component, http_status_code=None):
try:
yield
except Exception as e: # pylint: disable=broad-except
raise DashboardException(e, component=component, http_status_code=http_status_code)
@contextmanager
def handle_custom_error(component, http_status_code=None, exceptions=()):
try:
yield
except exceptions as e:
raise DashboardException(e, component=component, http_status_code=http_status_code)
| 3,912 | 28.421053 | 92 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/services/iscsi_cli.py
|
# -*- coding: utf-8 -*-
import errno
import json
from typing import Optional
from mgr_module import CLICheckNonemptyFileInput, CLIReadCommand, CLIWriteCommand
from ..rest_client import RequestException
from .iscsi_client import IscsiClient
from .iscsi_config import InvalidServiceUrl, IscsiGatewayAlreadyExists, \
IscsiGatewayDoesNotExist, IscsiGatewaysConfig, \
ManagedByOrchestratorException
@CLIReadCommand('dashboard iscsi-gateway-list')
def list_iscsi_gateways(_):
'''
List iSCSI gateways
'''
return 0, json.dumps(IscsiGatewaysConfig.get_gateways_config()), ''
@CLIWriteCommand('dashboard iscsi-gateway-add')
@CLICheckNonemptyFileInput(desc='iSCSI gateway configuration')
def add_iscsi_gateway(_, inbuf, name: Optional[str] = None):
'''
Add iSCSI gateway configuration. Gateway URL read from -i <file>
'''
service_url = inbuf
try:
IscsiGatewaysConfig.validate_service_url(service_url)
if name is None:
name = IscsiClient.instance(service_url=service_url).get_hostname()['data']
IscsiGatewaysConfig.add_gateway(name, service_url)
return 0, 'Success', ''
except IscsiGatewayAlreadyExists as ex:
return -errno.EEXIST, '', str(ex)
except InvalidServiceUrl as ex:
return -errno.EINVAL, '', str(ex)
except ManagedByOrchestratorException as ex:
return -errno.EINVAL, '', str(ex)
except RequestException as ex:
return -errno.EINVAL, '', str(ex)
@CLIWriteCommand('dashboard iscsi-gateway-rm')
def remove_iscsi_gateway(_, name: str):
'''
Remove iSCSI gateway configuration
'''
try:
IscsiGatewaysConfig.remove_gateway(name)
return 0, 'Success', ''
except IscsiGatewayDoesNotExist as ex:
return -errno.ENOENT, '', str(ex)
except ManagedByOrchestratorException as ex:
return -errno.EINVAL, '', str(ex)
| 1,895 | 31.135593 | 87 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/services/iscsi_client.py
|
# -*- coding: utf-8 -*-
# pylint: disable=too-many-public-methods
import json
import logging
from requests.auth import HTTPBasicAuth
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from ..rest_client import RestClient
from ..settings import Settings
from .iscsi_config import IscsiGatewaysConfig
logger = logging.getLogger('iscsi_client')
class IscsiClient(RestClient):
_CLIENT_NAME = 'iscsi'
_instances = {} # type: dict
service_url = None
gateway_name = None
@classmethod
def instance(cls, gateway_name=None, service_url=None):
if not service_url:
if not gateway_name:
gateway_name = list(IscsiGatewaysConfig.get_gateways_config()['gateways'].keys())[0]
gateways_config = IscsiGatewaysConfig.get_gateway_config(gateway_name)
service_url = gateways_config['service_url']
instance = cls._instances.get(gateway_name)
if not instance or service_url != instance.service_url or \
instance.session.verify != Settings.ISCSI_API_SSL_VERIFICATION:
url = urlparse(service_url)
ssl = url.scheme == 'https'
host = url.hostname
port = url.port
username = url.username
password = url.password
if not port:
port = 443 if ssl else 80
auth = HTTPBasicAuth(username, password)
instance = IscsiClient(host, port, IscsiClient._CLIENT_NAME, ssl,
auth, Settings.ISCSI_API_SSL_VERIFICATION)
instance.service_url = service_url
instance.gateway_name = gateway_name
if gateway_name:
cls._instances[gateway_name] = instance
return instance
@RestClient.api_get('/api/_ping')
def ping(self, request=None):
return request()
@RestClient.api_get('/api/settings')
def get_settings(self, request=None):
return request()
@RestClient.api_get('/api/sysinfo/ip_addresses')
def get_ip_addresses(self, request=None):
return request()
@RestClient.api_get('/api/sysinfo/hostname')
def get_hostname(self, request=None):
return request()
@RestClient.api_get('/api/config')
def get_config(self, request=None):
return request({
'decrypt_passwords': True
})
@RestClient.api_put('/api/target/{target_iqn}')
def create_target(self, target_iqn, target_controls, request=None):
logger.debug("[%s] Creating target: %s", self.gateway_name, target_iqn)
return request({
'controls': json.dumps(target_controls)
})
@RestClient.api_delete('/api/target/{target_iqn}')
def delete_target(self, target_iqn, request=None):
logger.debug("[%s] Deleting target: %s", self.gateway_name, target_iqn)
return request()
@RestClient.api_put('/api/target/{target_iqn}')
def reconfigure_target(self, target_iqn, target_controls, request=None):
logger.debug("[%s] Reconfiguring target: %s", self.gateway_name, target_iqn)
return request({
'mode': 'reconfigure',
'controls': json.dumps(target_controls)
})
@RestClient.api_put('/api/gateway/{target_iqn}/{gateway_name}')
def create_gateway(self, target_iqn, gateway_name, ip_address, request=None):
logger.debug("[%s] Creating gateway: %s/%s", self.gateway_name, target_iqn,
gateway_name)
return request({
'ip_address': ','.join(ip_address),
'skipchecks': 'true'
})
@RestClient.api_get('/api/gatewayinfo')
def get_gatewayinfo(self, request=None):
return request()
@RestClient.api_delete('/api/gateway/{target_iqn}/{gateway_name}')
def delete_gateway(self, target_iqn, gateway_name, request=None):
logger.debug("Deleting gateway: %s/%s", target_iqn, gateway_name)
return request()
@RestClient.api_put('/api/disk/{pool}/{image}')
def create_disk(self, pool, image, backstore, wwn, request=None):
logger.debug("[%s] Creating disk: %s/%s", self.gateway_name, pool, image)
return request({
'mode': 'create',
'backstore': backstore,
'wwn': wwn
})
@RestClient.api_delete('/api/disk/{pool}/{image}')
def delete_disk(self, pool, image, request=None):
logger.debug("[%s] Deleting disk: %s/%s", self.gateway_name, pool, image)
return request({
'preserve_image': 'true'
})
@RestClient.api_put('/api/disk/{pool}/{image}')
def reconfigure_disk(self, pool, image, controls, request=None):
logger.debug("[%s] Reconfiguring disk: %s/%s", self.gateway_name, pool, image)
return request({
'controls': json.dumps(controls),
'mode': 'reconfigure'
})
@RestClient.api_put('/api/targetlun/{target_iqn}')
def create_target_lun(self, target_iqn, image_id, lun, request=None):
logger.debug("[%s] Creating target lun: %s/%s", self.gateway_name, target_iqn,
image_id)
return request({
'disk': image_id,
'lun_id': lun
})
@RestClient.api_delete('/api/targetlun/{target_iqn}')
def delete_target_lun(self, target_iqn, image_id, request=None):
logger.debug("[%s] Deleting target lun: %s/%s", self.gateway_name, target_iqn,
image_id)
return request({
'disk': image_id
})
@RestClient.api_put('/api/client/{target_iqn}/{client_iqn}')
def create_client(self, target_iqn, client_iqn, request=None):
logger.debug("[%s] Creating client: %s/%s", self.gateway_name, target_iqn, client_iqn)
return request()
@RestClient.api_delete('/api/client/{target_iqn}/{client_iqn}')
def delete_client(self, target_iqn, client_iqn, request=None):
logger.debug("[%s] Deleting client: %s/%s", self.gateway_name, target_iqn, client_iqn)
return request()
@RestClient.api_put('/api/clientlun/{target_iqn}/{client_iqn}')
def create_client_lun(self, target_iqn, client_iqn, image_id, request=None):
logger.debug("[%s] Creating client lun: %s/%s", self.gateway_name, target_iqn,
client_iqn)
return request({
'disk': image_id
})
@RestClient.api_delete('/api/clientlun/{target_iqn}/{client_iqn}')
def delete_client_lun(self, target_iqn, client_iqn, image_id, request=None):
logger.debug("iSCSI[%s] Deleting client lun: %s/%s", self.gateway_name, target_iqn,
client_iqn)
return request({
'disk': image_id
})
@RestClient.api_put('/api/clientauth/{target_iqn}/{client_iqn}')
def create_client_auth(self, target_iqn, client_iqn, username, password, mutual_username,
mutual_password, request=None):
logger.debug("[%s] Creating client auth: %s/%s/%s/%s/%s/%s",
self.gateway_name, target_iqn, client_iqn, username, password, mutual_username,
mutual_password)
return request({
'username': username,
'password': password,
'mutual_username': mutual_username,
'mutual_password': mutual_password
})
@RestClient.api_put('/api/hostgroup/{target_iqn}/{group_name}')
def create_group(self, target_iqn, group_name, members, image_ids, request=None):
logger.debug("[%s] Creating group: %s/%s", self.gateway_name, target_iqn, group_name)
return request({
'members': ','.join(members),
'disks': ','.join(image_ids)
})
@RestClient.api_put('/api/hostgroup/{target_iqn}/{group_name}')
def update_group(self, target_iqn, group_name, members, image_ids, request=None):
logger.debug("iSCSI[%s] Updating group: %s/%s", self.gateway_name, target_iqn, group_name)
return request({
'action': 'remove',
'members': ','.join(members),
'disks': ','.join(image_ids)
})
@RestClient.api_delete('/api/hostgroup/{target_iqn}/{group_name}')
def delete_group(self, target_iqn, group_name, request=None):
logger.debug("[%s] Deleting group: %s/%s", self.gateway_name, target_iqn, group_name)
return request()
@RestClient.api_put('/api/discoveryauth')
def update_discoveryauth(self, user, password, mutual_user, mutual_password, request=None):
logger.debug("[%s] Updating discoveryauth: %s/%s/%s/%s", self.gateway_name, user,
password, mutual_user, mutual_password)
return request({
'username': user,
'password': password,
'mutual_username': mutual_user,
'mutual_password': mutual_password
})
@RestClient.api_put('/api/targetauth/{target_iqn}')
def update_targetacl(self, target_iqn, action, request=None):
logger.debug("[%s] Updating targetacl: %s/%s", self.gateway_name, target_iqn, action)
return request({
'action': action
})
@RestClient.api_put('/api/targetauth/{target_iqn}')
def update_targetauth(self, target_iqn, user, password, mutual_user, mutual_password,
request=None):
logger.debug("[%s] Updating targetauth: %s/%s/%s/%s/%s", self.gateway_name,
target_iqn, user, password, mutual_user, mutual_password)
return request({
'username': user,
'password': password,
'mutual_username': mutual_user,
'mutual_password': mutual_password
})
@RestClient.api_get('/api/targetinfo/{target_iqn}')
def get_targetinfo(self, target_iqn, request=None):
# pylint: disable=unused-argument
return request()
@RestClient.api_get('/api/clientinfo/{target_iqn}/{client_iqn}')
def get_clientinfo(self, target_iqn, client_iqn, request=None):
# pylint: disable=unused-argument
return request()
| 10,116 | 38.061776 | 100 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/services/iscsi_config.py
|
# -*- coding: utf-8 -*-
import json
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from .. import mgr
class IscsiGatewayAlreadyExists(Exception):
def __init__(self, gateway_name):
super(IscsiGatewayAlreadyExists, self).__init__(
"iSCSI gateway '{}' already exists".format(gateway_name))
class IscsiGatewayDoesNotExist(Exception):
def __init__(self, hostname):
super(IscsiGatewayDoesNotExist, self).__init__(
"iSCSI gateway '{}' does not exist".format(hostname))
class InvalidServiceUrl(Exception):
def __init__(self, service_url):
super(InvalidServiceUrl, self).__init__(
"Invalid service URL '{}'. "
"Valid format: '<scheme>://<username>:<password>@<host>[:port]'.".format(service_url))
class ManagedByOrchestratorException(Exception):
def __init__(self):
super(ManagedByOrchestratorException, self).__init__(
"iSCSI configuration is managed by the orchestrator")
_ISCSI_STORE_KEY = "_iscsi_config"
class IscsiGatewaysConfig(object):
@classmethod
def _load_config_from_store(cls):
json_db = mgr.get_store(_ISCSI_STORE_KEY,
'{"gateways": {}}')
config = json.loads(json_db)
cls.update_iscsi_config(config)
return config
@classmethod
def update_iscsi_config(cls, config):
"""
Since `ceph-iscsi` config v10, gateway names were renamed from host short name to FQDN.
If Ceph Dashboard were configured before v10, we try to update our internal gateways
database automatically.
"""
for gateway_name, gateway_config in list(config['gateways'].items()):
if '.' not in gateway_name:
from ..rest_client import RequestException
from .iscsi_client import IscsiClient # pylint: disable=cyclic-import
try:
service_url = gateway_config['service_url']
new_gateway_name = IscsiClient.instance(
service_url=service_url).get_hostname()['data']
if gateway_name != new_gateway_name:
config['gateways'][new_gateway_name] = gateway_config
del config['gateways'][gateway_name]
cls._save_config(config)
except RequestException:
# If gateway is not acessible, it should be removed manually
# or we will try to update automatically next time
continue
@classmethod
def _save_config(cls, config):
mgr.set_store(_ISCSI_STORE_KEY, json.dumps(config))
@classmethod
def validate_service_url(cls, service_url):
url = urlparse(service_url)
if not url.scheme or not url.hostname or not url.username or not url.password:
raise InvalidServiceUrl(service_url)
@classmethod
def add_gateway(cls, name, service_url):
config = cls.get_gateways_config()
if name in config:
raise IscsiGatewayAlreadyExists(name)
IscsiGatewaysConfig.validate_service_url(service_url)
config['gateways'][name] = {'service_url': service_url}
cls._save_config(config)
@classmethod
def remove_gateway(cls, name):
config = cls._load_config_from_store()
if name not in config['gateways']:
raise IscsiGatewayDoesNotExist(name)
del config['gateways'][name]
cls._save_config(config)
@classmethod
def get_gateways_config(cls):
return cls._load_config_from_store()
@classmethod
def get_gateway_config(cls, name):
config = IscsiGatewaysConfig.get_gateways_config()
if name not in config['gateways']:
raise IscsiGatewayDoesNotExist(name)
return config['gateways'][name]
| 3,919 | 34 | 98 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/services/orchestrator.py
|
# -*- coding: utf-8 -*-
import logging
from functools import wraps
from typing import Any, Dict, List, Optional, Tuple
from ceph.deployment.service_spec import ServiceSpec
from orchestrator import DaemonDescription, DeviceLightLoc, HostSpec, \
InventoryFilter, OrchestratorClientMixin, OrchestratorError, OrchResult, \
ServiceDescription, raise_if_exception
from .. import mgr
from ._paginate import ListPaginator
logger = logging.getLogger('orchestrator')
# pylint: disable=abstract-method
class OrchestratorAPI(OrchestratorClientMixin):
def __init__(self):
super(OrchestratorAPI, self).__init__()
self.set_mgr(mgr) # type: ignore
def status(self):
try:
status, message, _module_details = super().available()
logger.info("is orchestrator available: %s, %s", status, message)
return dict(available=status, message=message)
except (RuntimeError, OrchestratorError, ImportError) as e:
return dict(
available=False,
message='Orchestrator is unavailable: {}'.format(str(e)))
def wait_api_result(method):
@wraps(method)
def inner(self, *args, **kwargs):
completion = method(self, *args, **kwargs)
raise_if_exception(completion)
return completion.result
return inner
class ResourceManager(object):
def __init__(self, api):
self.api = api
class HostManger(ResourceManager):
@wait_api_result
def list(self) -> List[HostSpec]:
return self.api.get_hosts()
@wait_api_result
def enter_maintenance(self, hostname: str, force: bool = False):
return self.api.enter_host_maintenance(hostname, force)
@wait_api_result
def exit_maintenance(self, hostname: str):
return self.api.exit_host_maintenance(hostname)
def get(self, hostname: str) -> Optional[HostSpec]:
hosts = [host for host in self.list() if host.hostname == hostname]
return hosts[0] if hosts else None
@wait_api_result
def add(self, hostname: str, addr: str, labels: List[str]):
return self.api.add_host(HostSpec(hostname, addr=addr, labels=labels))
@wait_api_result
def get_facts(self, hostname: Optional[str] = None) -> List[Dict[str, Any]]:
return self.api.get_facts(hostname)
@wait_api_result
def remove(self, hostname: str):
return self.api.remove_host(hostname)
@wait_api_result
def add_label(self, host: str, label: str) -> OrchResult[str]:
return self.api.add_host_label(host, label)
@wait_api_result
def remove_label(self, host: str, label: str) -> OrchResult[str]:
return self.api.remove_host_label(host, label)
@wait_api_result
def drain(self, hostname: str):
return self.api.drain_host(hostname)
class InventoryManager(ResourceManager):
@wait_api_result
def list(self, hosts=None, refresh=False):
host_filter = InventoryFilter(hosts=hosts) if hosts else None
return self.api.get_inventory(host_filter=host_filter, refresh=refresh)
class ServiceManager(ResourceManager):
def list(self,
service_type: Optional[str] = None,
service_name: Optional[str] = None,
offset: int = 0, limit: int = -1,
sort: str = '+service_name', search: str = '') -> Tuple[List[Dict[Any, Any]], int]:
services = self.api.describe_service(service_type, service_name)
services = [service.to_dict() for service in services.result]
paginator = ListPaginator(offset, limit, sort, search,
input_list=services,
searchable_params=['service_name', 'status.running',
'status.last_refreshed', 'status.size'],
sortable_params=['service_name', 'status.running',
'status.last_refreshed', 'status.size'],
default_sort='+service_name')
return list(paginator.list()), paginator.get_count()
@wait_api_result
def get(self, service_name: str) -> ServiceDescription:
return self.api.describe_service(None, service_name)
@wait_api_result
def list_daemons(self,
service_name: Optional[str] = None,
daemon_type: Optional[str] = None,
hostname: Optional[str] = None) -> List[DaemonDescription]:
return self.api.list_daemons(service_name=service_name,
daemon_type=daemon_type,
host=hostname)
def reload(self, service_type, service_ids):
if not isinstance(service_ids, list):
service_ids = [service_ids]
completion_list = [
self.api.service_action('reload', service_type, service_name,
service_id)
for service_name, service_id in service_ids
]
self.api.orchestrator_wait(completion_list)
for c in completion_list:
raise_if_exception(c)
@wait_api_result
def apply(self,
service_spec: Dict,
no_overwrite: Optional[bool] = False) -> OrchResult[List[str]]:
spec = ServiceSpec.from_json(service_spec)
return self.api.apply([spec], no_overwrite)
@wait_api_result
def remove(self, service_name: str) -> List[str]:
return self.api.remove_service(service_name)
class OsdManager(ResourceManager):
@wait_api_result
def create(self, drive_group_specs):
return self.api.apply_drivegroups(drive_group_specs)
@wait_api_result
def remove(self, osd_ids, replace=False, force=False):
return self.api.remove_osds(osd_ids, replace, force)
@wait_api_result
def removing_status(self):
return self.api.remove_osds_status()
class DaemonManager(ResourceManager):
@wait_api_result
def action(self, daemon_name='', action='', image=None):
return self.api.daemon_action(daemon_name=daemon_name, action=action, image=image)
class UpgradeManager(ResourceManager):
@wait_api_result
def list(self, image: Optional[str], tags: bool,
show_all_versions: Optional[bool]) -> Dict[Any, Any]:
return self.api.upgrade_ls(image, tags, show_all_versions)
@wait_api_result
def status(self):
return self.api.upgrade_status()
@wait_api_result
def start(self, image: str, version: str, daemon_types: Optional[List[str]] = None,
host_placement: Optional[str] = None, services: Optional[List[str]] = None,
limit: Optional[int] = None) -> str:
return self.api.upgrade_start(image, version, daemon_types, host_placement, services,
limit)
@wait_api_result
def pause(self) -> str:
return self.api.upgrade_pause()
@wait_api_result
def resume(self) -> str:
return self.api.upgrade_resume()
@wait_api_result
def stop(self) -> str:
return self.api.upgrade_stop()
class OrchClient(object):
_instance = None
@classmethod
def instance(cls):
# type: () -> OrchClient
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self.api = OrchestratorAPI()
self.hosts = HostManger(self.api)
self.inventory = InventoryManager(self.api)
self.services = ServiceManager(self.api)
self.osds = OsdManager(self.api)
self.daemons = DaemonManager(self.api)
self.upgrades = UpgradeManager(self.api)
def available(self, features: Optional[List[str]] = None) -> bool:
available = self.status()['available']
if available and features is not None:
return not self.get_missing_features(features)
return available
def status(self) -> Dict[str, Any]:
status = self.api.status()
status['features'] = {}
if status['available']:
status['features'] = self.api.get_feature_set()
return status
def get_missing_features(self, features: List[str]) -> List[str]:
supported_features = {k for k, v in self.api.get_feature_set().items() if v['available']}
return list(set(features) - supported_features)
@wait_api_result
def blink_device_light(self, hostname, device, ident_fault, on):
# type: (str, str, str, bool) -> OrchResult[List[str]]
return self.api.blink_device_light(
ident_fault, on, [DeviceLightLoc(hostname, device, device)])
class OrchFeature(object):
HOST_LIST = 'get_hosts'
HOST_ADD = 'add_host'
HOST_REMOVE = 'remove_host'
HOST_LABEL_ADD = 'add_host_label'
HOST_LABEL_REMOVE = 'remove_host_label'
HOST_MAINTENANCE_ENTER = 'enter_host_maintenance'
HOST_MAINTENANCE_EXIT = 'exit_host_maintenance'
HOST_DRAIN = 'drain_host'
SERVICE_LIST = 'describe_service'
SERVICE_CREATE = 'apply'
SERVICE_EDIT = 'apply'
SERVICE_DELETE = 'remove_service'
SERVICE_RELOAD = 'service_action'
DAEMON_LIST = 'list_daemons'
OSD_GET_REMOVE_STATUS = 'remove_osds_status'
OSD_CREATE = 'apply_drivegroups'
OSD_DELETE = 'remove_osds'
DEVICE_LIST = 'get_inventory'
DEVICE_BLINK_LIGHT = 'blink_device_light'
DAEMON_ACTION = 'daemon_action'
UPGRADE_LIST = 'upgrade_ls'
UPGRADE_STATUS = 'upgrade_status'
UPGRADE_START = 'upgrade_start'
UPGRADE_PAUSE = 'upgrade_pause'
UPGRADE_RESUME = 'upgrade_resume'
UPGRADE_STOP = 'upgrade_stop'
| 9,674 | 33.430605 | 97 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/services/osd.py
|
# -*- coding: utf-8 -*-
from enum import Enum
class OsdDeploymentOptions(str, Enum):
COST_CAPACITY = 'cost_capacity'
THROUGHPUT = 'throughput_optimized'
IOPS = 'iops_optimized'
class HostStorageSummary:
def __init__(self, name: str, title=None, desc=None, available=False,
capacity=0, used=0, hdd_used=0, ssd_used=0, nvme_used=0):
self.name = name
self.title = title
self.desc = desc
self.available = available
self.capacity = capacity
self.used = used
self.hdd_used = hdd_used
self.ssd_used = ssd_used
self.nvme_used = nvme_used
def as_dict(self):
return self.__dict__
| 692 | 25.653846 | 74 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/services/progress.py
|
# -*- coding: utf-8 -*-
'''
Progress Mgr Module Helper
This python module implements helper methods to retrieve the
executing and completed tasks tacked by the progress mgr module
using the same structure of dashboard tasks
'''
import logging
from datetime import datetime
from .. import mgr
from . import rbd # pylint: disable=no-name-in-module
logger = logging.getLogger('progress')
def _progress_event_to_dashboard_task_common(event, task):
if event['refs'] and isinstance(event['refs'], dict):
refs = event['refs']
if refs['origin'] == "rbd_support":
# rbd mgr module event, we can transform this event into an rbd dashboard task
action_map = {
'remove': "delete",
'flatten': "flatten",
'trash remove': "trash/remove"
}
action = action_map.get(refs['action'], refs['action'])
metadata = {}
if 'image_name' in refs:
metadata['image_spec'] = rbd.get_image_spec(refs['pool_name'],
refs['pool_namespace'],
refs['image_name'])
else:
metadata['image_id_spec'] = rbd.get_image_spec(refs['pool_name'],
refs['pool_namespace'],
refs['image_id'])
task.update({
'name': "rbd/{}".format(action),
'metadata': metadata,
'begin_time': "{}Z".format(datetime.fromtimestamp(event["started_at"])
.isoformat()),
})
return
task.update({
# we're prepending the "progress/" prefix to tag tasks that come
# from the progress module
'name': "progress/{}".format(event['message']),
'metadata': dict(event.get('refs', {})),
'begin_time': "{}Z".format(datetime.fromtimestamp(event["started_at"])
.isoformat()),
})
def _progress_event_to_dashboard_task(event, completed=False):
task = {}
_progress_event_to_dashboard_task_common(event, task)
if not completed:
task.update({
'progress': int(100 * event['progress'])
})
else:
task.update({
'end_time': "{}Z".format(datetime.fromtimestamp(event['finished_at'])
.isoformat()),
'duration': event['finished_at'] - event['started_at'],
'progress': 100,
'success': 'failed' not in event,
'ret_value': None,
'exception': {'detail': event['failure_message']} if 'failed' in event else None
})
return task
def get_progress_tasks():
executing_t = []
finished_t = []
progress_events = mgr.remote('progress', "_json")
for ev in progress_events['events']:
logger.debug("event=%s", ev)
executing_t.append(_progress_event_to_dashboard_task(ev))
for ev in progress_events['completed']:
logger.debug("finished event=%s", ev)
finished_t.append(_progress_event_to_dashboard_task(ev, True))
return executing_t, finished_t
| 3,309 | 34.978261 | 92 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/services/rbd.py
|
# -*- coding: utf-8 -*-
# pylint: disable=unused-argument
import errno
import json
import math
from enum import IntEnum
import cherrypy
import rados
import rbd
from .. import mgr
from ..exceptions import DashboardException
from ..plugins.ttl_cache import ttl_cache, ttl_cache_invalidator
from ._paginate import ListPaginator
from .ceph_service import CephService
try:
from typing import List, Optional
except ImportError:
pass # For typing only
RBD_FEATURES_NAME_MAPPING = {
rbd.RBD_FEATURE_LAYERING: "layering",
rbd.RBD_FEATURE_STRIPINGV2: "striping",
rbd.RBD_FEATURE_EXCLUSIVE_LOCK: "exclusive-lock",
rbd.RBD_FEATURE_OBJECT_MAP: "object-map",
rbd.RBD_FEATURE_FAST_DIFF: "fast-diff",
rbd.RBD_FEATURE_DEEP_FLATTEN: "deep-flatten",
rbd.RBD_FEATURE_JOURNALING: "journaling",
rbd.RBD_FEATURE_DATA_POOL: "data-pool",
rbd.RBD_FEATURE_OPERATIONS: "operations",
}
RBD_IMAGE_REFS_CACHE_REFERENCE = 'rbd_image_refs'
GET_IOCTX_CACHE = 'get_ioctx'
POOL_NAMESPACES_CACHE = 'pool_namespaces'
class MIRROR_IMAGE_MODE(IntEnum):
journal = rbd.RBD_MIRROR_IMAGE_MODE_JOURNAL
snapshot = rbd.RBD_MIRROR_IMAGE_MODE_SNAPSHOT
def _rbd_support_remote(method_name: str, *args, **kwargs):
try:
return mgr.remote('rbd_support', method_name, *args, **kwargs)
except ImportError as ie:
raise DashboardException(f'rbd_support module not found {ie}')
except RuntimeError as ie:
raise DashboardException(f'rbd_support.{method_name} error: {ie}')
def format_bitmask(features):
"""
Formats the bitmask:
@DISABLEDOCTEST: >>> format_bitmask(45)
['deep-flatten', 'exclusive-lock', 'layering', 'object-map']
"""
names = [val for key, val in RBD_FEATURES_NAME_MAPPING.items()
if key & features == key]
return sorted(names)
def format_features(features):
"""
Converts the features list to bitmask:
@DISABLEDOCTEST: >>> format_features(['deep-flatten', 'exclusive-lock',
'layering', 'object-map'])
45
@DISABLEDOCTEST: >>> format_features(None) is None
True
@DISABLEDOCTEST: >>> format_features('deep-flatten, exclusive-lock')
32
"""
if isinstance(features, str):
features = features.split(',')
if not isinstance(features, list):
return None
res = 0
for key, value in RBD_FEATURES_NAME_MAPPING.items():
if value in features:
res = key | res
return res
def _sort_features(features, enable=True):
"""
Sorts image features according to feature dependencies:
object-map depends on exclusive-lock
journaling depends on exclusive-lock
fast-diff depends on object-map
"""
ORDER = ['exclusive-lock', 'journaling', 'object-map', 'fast-diff'] # noqa: N806
def key_func(feat):
try:
return ORDER.index(feat)
except ValueError:
return id(feat)
features.sort(key=key_func, reverse=not enable)
def get_image_spec(pool_name, namespace, rbd_name):
namespace = '{}/'.format(namespace) if namespace else ''
return '{}/{}{}'.format(pool_name, namespace, rbd_name)
def parse_image_spec(image_spec):
namespace_spec, image_name = image_spec.rsplit('/', 1)
if '/' in namespace_spec:
pool_name, namespace = namespace_spec.rsplit('/', 1)
else:
pool_name, namespace = namespace_spec, None
return pool_name, namespace, image_name
def rbd_call(pool_name, namespace, func, *args, **kwargs):
with mgr.rados.open_ioctx(pool_name) as ioctx:
ioctx.set_namespace(namespace if namespace is not None else '')
return func(ioctx, *args, **kwargs)
def rbd_image_call(pool_name, namespace, image_name, func, *args, **kwargs):
def _ioctx_func(ioctx, image_name, func, *args, **kwargs):
with rbd.Image(ioctx, image_name) as img:
return func(ioctx, img, *args, **kwargs)
return rbd_call(pool_name, namespace, _ioctx_func, image_name, func, *args, **kwargs)
class RbdConfiguration(object):
_rbd = rbd.RBD()
def __init__(self, pool_name: str = '', namespace: str = '', image_name: str = '',
pool_ioctx: Optional[rados.Ioctx] = None, image_ioctx: Optional[rbd.Image] = None):
assert bool(pool_name) != bool(pool_ioctx) # xor
self._pool_name = pool_name
self._namespace = namespace if namespace is not None else ''
self._image_name = image_name
self._pool_ioctx = pool_ioctx
self._image_ioctx = image_ioctx
@staticmethod
def _ensure_prefix(option):
# type: (str) -> str
return option if option.startswith('conf_') else 'conf_' + option
def list(self):
# type: () -> List[dict]
def _list(ioctx):
if self._image_name: # image config
try:
# No need to open the context of the image again
# if we already did open it.
if self._image_ioctx:
result = self._image_ioctx.config_list()
else:
with rbd.Image(ioctx, self._image_name) as image:
result = image.config_list()
except rbd.ImageNotFound:
result = []
else: # pool config
pg_status = list(CephService.get_pool_pg_status(self._pool_name).keys())
if len(pg_status) == 1 and 'incomplete' in pg_status[0]:
# If config_list would be called with ioctx if it's a bad pool,
# the dashboard would stop working, waiting for the response
# that would not happen.
#
# This is only a workaround for https://tracker.ceph.com/issues/43771 which
# already got rejected as not worth the effort.
#
# Are more complete workaround for the dashboard will be implemented with
# https://tracker.ceph.com/issues/44224
#
# @TODO: If #44224 is addressed remove this workaround
return []
result = self._rbd.config_list(ioctx)
return list(result)
if self._pool_name:
ioctx = mgr.rados.open_ioctx(self._pool_name)
ioctx.set_namespace(self._namespace)
else:
ioctx = self._pool_ioctx
return _list(ioctx)
def get(self, option_name):
# type: (str) -> str
option_name = self._ensure_prefix(option_name)
with mgr.rados.open_ioctx(self._pool_name) as pool_ioctx:
pool_ioctx.set_namespace(self._namespace)
if self._image_name:
with rbd.Image(pool_ioctx, self._image_name) as image:
return image.metadata_get(option_name)
return self._rbd.pool_metadata_get(pool_ioctx, option_name)
def set(self, option_name, option_value):
# type: (str, str) -> None
option_value = str(option_value)
option_name = self._ensure_prefix(option_name)
pool_ioctx = self._pool_ioctx
if self._pool_name: # open ioctx
pool_ioctx = mgr.rados.open_ioctx(self._pool_name)
pool_ioctx.__enter__() # type: ignore
pool_ioctx.set_namespace(self._namespace) # type: ignore
image_ioctx = self._image_ioctx
if self._image_name:
image_ioctx = rbd.Image(pool_ioctx, self._image_name)
image_ioctx.__enter__() # type: ignore
if image_ioctx:
image_ioctx.metadata_set(option_name, option_value) # type: ignore
else:
self._rbd.pool_metadata_set(pool_ioctx, option_name, option_value)
if self._image_name: # Name provided, so we opened it and now have to close it
image_ioctx.__exit__(None, None, None) # type: ignore
if self._pool_name:
pool_ioctx.__exit__(None, None, None) # type: ignore
def remove(self, option_name):
"""
Removes an option by name. Will not raise an error, if the option hasn't been found.
:type option_name str
"""
def _remove(ioctx):
try:
if self._image_name:
with rbd.Image(ioctx, self._image_name) as image:
image.metadata_remove(option_name)
else:
self._rbd.pool_metadata_remove(ioctx, option_name)
except KeyError:
pass
option_name = self._ensure_prefix(option_name)
if self._pool_name:
with mgr.rados.open_ioctx(self._pool_name) as pool_ioctx:
pool_ioctx.set_namespace(self._namespace)
_remove(pool_ioctx)
else:
_remove(self._pool_ioctx)
def set_configuration(self, configuration):
if configuration:
for option_name, option_value in configuration.items():
if option_value is not None:
self.set(option_name, option_value)
else:
self.remove(option_name)
class RbdService(object):
_rbd_inst = rbd.RBD()
# set of image features that can be enable on existing images
ALLOW_ENABLE_FEATURES = {"exclusive-lock", "object-map", "fast-diff", "journaling"}
# set of image features that can be disabled on existing images
ALLOW_DISABLE_FEATURES = {"exclusive-lock", "object-map", "fast-diff", "deep-flatten",
"journaling"}
@classmethod
def _rbd_disk_usage(cls, image, snaps, whole_object=True):
class DUCallback(object):
def __init__(self):
self.used_size = 0
def __call__(self, offset, length, exists):
if exists:
self.used_size += length
snap_map = {}
prev_snap = None
total_used_size = 0
for _, size, name in snaps:
image.set_snap(name)
du_callb = DUCallback()
image.diff_iterate(0, size, prev_snap, du_callb,
whole_object=whole_object)
snap_map[name] = du_callb.used_size
total_used_size += du_callb.used_size
prev_snap = name
return total_used_size, snap_map
@classmethod
def _rbd_image(cls, ioctx, pool_name, namespace, image_name, # pylint: disable=R0912
omit_usage=False):
with rbd.Image(ioctx, image_name) as img:
stat = img.stat()
mirror_info = img.mirror_image_get_info()
mirror_mode = img.mirror_image_get_mode()
if mirror_mode == rbd.RBD_MIRROR_IMAGE_MODE_JOURNAL and mirror_info['state'] != rbd.RBD_MIRROR_IMAGE_DISABLED: # noqa E501 #pylint: disable=line-too-long
stat['mirror_mode'] = 'journal'
elif mirror_mode == rbd.RBD_MIRROR_IMAGE_MODE_SNAPSHOT:
stat['mirror_mode'] = 'snapshot'
schedule_status = json.loads(_rbd_support_remote(
'mirror_snapshot_schedule_status')[1])
for scheduled_image in schedule_status['scheduled_images']:
if scheduled_image['image'] == get_image_spec(pool_name, namespace, image_name):
stat['schedule_info'] = scheduled_image
else:
stat['mirror_mode'] = 'Disabled'
stat['name'] = image_name
stat['primary'] = None
if mirror_info['state'] == rbd.RBD_MIRROR_IMAGE_ENABLED:
stat['primary'] = mirror_info['primary']
if img.old_format():
stat['unique_id'] = get_image_spec(pool_name, namespace, stat['block_name_prefix'])
stat['id'] = stat['unique_id']
stat['image_format'] = 1
else:
stat['unique_id'] = get_image_spec(pool_name, namespace, img.id())
stat['id'] = img.id()
stat['image_format'] = 2
stat['pool_name'] = pool_name
stat['namespace'] = namespace
features = img.features()
stat['features'] = features
stat['features_name'] = format_bitmask(features)
# the following keys are deprecated
del stat['parent_pool']
del stat['parent_name']
stat['timestamp'] = "{}Z".format(img.create_timestamp()
.isoformat())
stat['stripe_count'] = img.stripe_count()
stat['stripe_unit'] = img.stripe_unit()
data_pool_name = CephService.get_pool_name_from_id(
img.data_pool_id())
if data_pool_name == pool_name:
data_pool_name = None
stat['data_pool'] = data_pool_name
stat['parent'] = cls._rbd_image_stat_parent(img)
# snapshots
stat['snapshots'] = []
for snap in img.list_snaps():
try:
snap['mirror_mode'] = MIRROR_IMAGE_MODE(img.mirror_image_get_mode()).name
except ValueError as ex:
raise DashboardException(f'Unknown RBD Mirror mode: {ex}')
snap['timestamp'] = "{}Z".format(
img.get_snap_timestamp(snap['id']).isoformat())
snap['is_protected'] = None
if mirror_mode != rbd.RBD_MIRROR_IMAGE_MODE_SNAPSHOT:
snap['is_protected'] = img.is_protected_snap(snap['name'])
snap['used_bytes'] = None
snap['children'] = []
if mirror_mode != rbd.RBD_MIRROR_IMAGE_MODE_SNAPSHOT:
img.set_snap(snap['name'])
for child_pool_name, child_image_name in img.list_children():
snap['children'].append({
'pool_name': child_pool_name,
'image_name': child_image_name
})
stat['snapshots'].append(snap)
# disk usage
img_flags = img.flags()
if not omit_usage and 'fast-diff' in stat['features_name'] and \
not rbd.RBD_FLAG_FAST_DIFF_INVALID & img_flags and \
mirror_mode != rbd.RBD_MIRROR_IMAGE_MODE_SNAPSHOT:
snaps = [(s['id'], s['size'], s['name'])
for s in stat['snapshots']]
snaps.sort(key=lambda s: s[0])
snaps += [(snaps[-1][0] + 1 if snaps else 0, stat['size'], None)]
total_prov_bytes, snaps_prov_bytes = cls._rbd_disk_usage(
img, snaps, True)
stat['total_disk_usage'] = total_prov_bytes
for snap, prov_bytes in snaps_prov_bytes.items():
if snap is None:
stat['disk_usage'] = prov_bytes
continue
for ss in stat['snapshots']:
if ss['name'] == snap:
ss['disk_usage'] = prov_bytes
break
else:
stat['total_disk_usage'] = None
stat['disk_usage'] = None
stat['configuration'] = RbdConfiguration(
pool_ioctx=ioctx, image_name=image_name, image_ioctx=img).list()
stat['metadata'] = RbdImageMetadataService(img).list()
return stat
@classmethod
def _rbd_image_stat_parent(cls, img):
stat_parent = None
try:
stat_parent = img.get_parent_image_spec()
except rbd.ImageNotFound:
# no parent image
stat_parent = None
return stat_parent
@classmethod
@ttl_cache(10, label=GET_IOCTX_CACHE)
def get_ioctx(cls, pool_name, namespace=''):
ioctx = mgr.rados.open_ioctx(pool_name)
ioctx.set_namespace(namespace)
return ioctx
@classmethod
@ttl_cache(30, label=RBD_IMAGE_REFS_CACHE_REFERENCE)
def _rbd_image_refs(cls, pool_name, namespace=''):
# We add and set the namespace here so that we cache by ioctx and namespace.
images = []
ioctx = cls.get_ioctx(pool_name, namespace)
images = cls._rbd_inst.list2(ioctx)
return images
@classmethod
@ttl_cache(30, label=POOL_NAMESPACES_CACHE)
def _pool_namespaces(cls, pool_name, namespace=None):
namespaces = []
if namespace:
namespaces = [namespace]
else:
ioctx = cls.get_ioctx(pool_name, namespace=rados.LIBRADOS_ALL_NSPACES)
namespaces = cls._rbd_inst.namespace_list(ioctx)
# images without namespace
namespaces.append('')
return namespaces
@classmethod
def _rbd_image_stat(cls, ioctx, pool_name, namespace, image_name):
return cls._rbd_image(ioctx, pool_name, namespace, image_name)
@classmethod
def _rbd_image_stat_removing(cls, ioctx, pool_name, namespace, image_id):
img = cls._rbd_inst.trash_get(ioctx, image_id)
img_spec = get_image_spec(pool_name, namespace, image_id)
if img['source'] == 'REMOVING':
img['unique_id'] = img_spec
img['pool_name'] = pool_name
img['namespace'] = namespace
img['deletion_time'] = "{}Z".format(img['deletion_time'].isoformat())
img['deferment_end_time'] = "{}Z".format(img['deferment_end_time'].isoformat())
return img
raise rbd.ImageNotFound('No image {} in status `REMOVING` found.'.format(img_spec),
errno=errno.ENOENT)
@classmethod
def _rbd_pool_image_refs(cls, pool_names: List[str], namespace: Optional[str] = None):
joint_refs = []
for pool in pool_names:
for current_namespace in cls._pool_namespaces(pool, namespace=namespace):
image_refs = cls._rbd_image_refs(pool, current_namespace)
for image in image_refs:
image['namespace'] = current_namespace
image['pool_name'] = pool
joint_refs.append(image)
return joint_refs
@classmethod
def rbd_pool_list(cls, pool_names: List[str], namespace: Optional[str] = None, offset: int = 0,
limit: int = 5, search: str = '', sort: str = ''):
image_refs = cls._rbd_pool_image_refs(pool_names, namespace)
params = ['name', 'pool_name', 'namespace']
paginator = ListPaginator(offset, limit, sort, search, image_refs,
searchable_params=params, sortable_params=params,
default_sort='+name')
result = []
for image_ref in paginator.list():
with mgr.rados.open_ioctx(image_ref['pool_name']) as ioctx:
ioctx.set_namespace(image_ref['namespace'])
# Check if the RBD has been deleted partially. This happens for example if
# the deletion process of the RBD has been started and was interrupted.
try:
stat = cls._rbd_image_stat(
ioctx, image_ref['pool_name'], image_ref['namespace'], image_ref['name'])
except rbd.ImageNotFound:
try:
stat = cls._rbd_image_stat_removing(
ioctx, image_ref['pool_name'], image_ref['namespace'], image_ref['id'])
except rbd.ImageNotFound:
continue
result.append(stat)
return result, paginator.get_count()
@classmethod
def get_image(cls, image_spec, omit_usage=False):
pool_name, namespace, image_name = parse_image_spec(image_spec)
ioctx = mgr.rados.open_ioctx(pool_name)
if namespace:
ioctx.set_namespace(namespace)
try:
return cls._rbd_image(ioctx, pool_name, namespace, image_name, omit_usage)
except rbd.ImageNotFound:
raise cherrypy.HTTPError(404, 'Image not found')
@classmethod
@ttl_cache_invalidator(RBD_IMAGE_REFS_CACHE_REFERENCE)
def create(cls, name, pool_name, size, namespace=None,
obj_size=None, features=None, stripe_unit=None, stripe_count=None,
data_pool=None, configuration=None, metadata=None):
size = int(size)
def _create(ioctx):
rbd_inst = cls._rbd_inst
# Set order
l_order = None
if obj_size and obj_size > 0:
l_order = int(round(math.log(float(obj_size), 2)))
# Set features
feature_bitmask = format_features(features)
rbd_inst.create(ioctx, name, size, order=l_order, old_format=False,
features=feature_bitmask, stripe_unit=stripe_unit,
stripe_count=stripe_count, data_pool=data_pool)
RbdConfiguration(pool_ioctx=ioctx, namespace=namespace,
image_name=name).set_configuration(configuration)
if metadata:
with rbd.Image(ioctx, name) as image:
RbdImageMetadataService(image).set_metadata(metadata)
rbd_call(pool_name, namespace, _create)
@classmethod
@ttl_cache_invalidator(RBD_IMAGE_REFS_CACHE_REFERENCE)
def set(cls, image_spec, name=None, size=None, features=None,
configuration=None, metadata=None, enable_mirror=None, primary=None,
force=False, resync=False, mirror_mode=None, schedule_interval='',
remove_scheduling=False):
# pylint: disable=too-many-branches
pool_name, namespace, image_name = parse_image_spec(image_spec)
def _edit(ioctx, image):
rbd_inst = cls._rbd_inst
# check rename image
if name and name != image_name:
rbd_inst.rename(ioctx, image_name, name)
# check resize
if size and size != image.size():
image.resize(size)
mirror_image_info = image.mirror_image_get_info()
if enable_mirror and mirror_image_info['state'] == rbd.RBD_MIRROR_IMAGE_DISABLED:
RbdMirroringService.enable_image(
image_name, pool_name, namespace,
MIRROR_IMAGE_MODE[mirror_mode])
elif (enable_mirror is False
and mirror_image_info['state'] == rbd.RBD_MIRROR_IMAGE_ENABLED):
RbdMirroringService.disable_image(
image_name, pool_name, namespace)
# check enable/disable features
if features is not None:
curr_features = format_bitmask(image.features())
# check disabled features
_sort_features(curr_features, enable=False)
for feature in curr_features:
if (feature not in features
and feature in cls.ALLOW_DISABLE_FEATURES
and feature in format_bitmask(image.features())):
f_bitmask = format_features([feature])
image.update_features(f_bitmask, False)
# check enabled features
_sort_features(features)
for feature in features:
if (feature not in curr_features
and feature in cls.ALLOW_ENABLE_FEATURES
and feature not in format_bitmask(image.features())):
f_bitmask = format_features([feature])
image.update_features(f_bitmask, True)
RbdConfiguration(pool_ioctx=ioctx, image_name=image_name).set_configuration(
configuration)
if metadata:
RbdImageMetadataService(image).set_metadata(metadata)
if primary and not mirror_image_info['primary']:
RbdMirroringService.promote_image(
image_name, pool_name, namespace, force)
elif primary is False and mirror_image_info['primary']:
RbdMirroringService.demote_image(
image_name, pool_name, namespace)
if resync:
RbdMirroringService.resync_image(image_name, pool_name, namespace)
if schedule_interval:
RbdMirroringService.snapshot_schedule_add(image_spec, schedule_interval)
if remove_scheduling:
RbdMirroringService.snapshot_schedule_remove(image_spec)
return rbd_image_call(pool_name, namespace, image_name, _edit)
@classmethod
@ttl_cache_invalidator(RBD_IMAGE_REFS_CACHE_REFERENCE)
def delete(cls, image_spec):
pool_name, namespace, image_name = parse_image_spec(image_spec)
image = RbdService.get_image(image_spec)
snapshots = image['snapshots']
for snap in snapshots:
RbdSnapshotService.remove_snapshot(image_spec, snap['name'], snap['is_protected'])
rbd_inst = rbd.RBD()
return rbd_call(pool_name, namespace, rbd_inst.remove, image_name)
@classmethod
@ttl_cache_invalidator(RBD_IMAGE_REFS_CACHE_REFERENCE)
def copy(cls, image_spec, dest_pool_name, dest_namespace, dest_image_name,
snapshot_name=None, obj_size=None, features=None,
stripe_unit=None, stripe_count=None, data_pool=None,
configuration=None, metadata=None):
pool_name, namespace, image_name = parse_image_spec(image_spec)
def _src_copy(s_ioctx, s_img):
def _copy(d_ioctx):
# Set order
l_order = None
if obj_size and obj_size > 0:
l_order = int(round(math.log(float(obj_size), 2)))
# Set features
feature_bitmask = format_features(features)
if snapshot_name:
s_img.set_snap(snapshot_name)
s_img.copy(d_ioctx, dest_image_name, feature_bitmask, l_order,
stripe_unit, stripe_count, data_pool)
RbdConfiguration(pool_ioctx=d_ioctx, image_name=dest_image_name).set_configuration(
configuration)
if metadata:
with rbd.Image(d_ioctx, dest_image_name) as image:
RbdImageMetadataService(image).set_metadata(metadata)
return rbd_call(dest_pool_name, dest_namespace, _copy)
return rbd_image_call(pool_name, namespace, image_name, _src_copy)
@classmethod
@ttl_cache_invalidator(RBD_IMAGE_REFS_CACHE_REFERENCE)
def flatten(cls, image_spec):
def _flatten(ioctx, image):
image.flatten()
pool_name, namespace, image_name = parse_image_spec(image_spec)
return rbd_image_call(pool_name, namespace, image_name, _flatten)
@classmethod
def move_image_to_trash(cls, image_spec, delay):
pool_name, namespace, image_name = parse_image_spec(image_spec)
rbd_inst = cls._rbd_inst
return rbd_call(pool_name, namespace, rbd_inst.trash_move, image_name, delay)
class RbdSnapshotService(object):
@classmethod
def remove_snapshot(cls, image_spec, snapshot_name, unprotect=False):
def _remove_snapshot(ioctx, img, snapshot_name, unprotect):
if unprotect:
img.unprotect_snap(snapshot_name)
img.remove_snap(snapshot_name)
pool_name, namespace, image_name = parse_image_spec(image_spec)
return rbd_image_call(pool_name, namespace, image_name,
_remove_snapshot, snapshot_name, unprotect)
class RBDSchedulerInterval:
def __init__(self, interval: str):
self.amount = int(interval[:-1])
self.unit = interval[-1]
if self.unit not in 'mhd':
raise ValueError(f'Invalid interval unit {self.unit}')
def __str__(self):
return f'{self.amount}{self.unit}'
class RbdMirroringService:
@classmethod
def enable_image(cls, image_name: str, pool_name: str, namespace: str, mode: MIRROR_IMAGE_MODE):
rbd_image_call(pool_name, namespace, image_name,
lambda ioctx, image: image.mirror_image_enable(mode))
@classmethod
def disable_image(cls, image_name: str, pool_name: str, namespace: str, force: bool = False):
rbd_image_call(pool_name, namespace, image_name,
lambda ioctx, image: image.mirror_image_disable(force))
@classmethod
def promote_image(cls, image_name: str, pool_name: str, namespace: str, force: bool = False):
rbd_image_call(pool_name, namespace, image_name,
lambda ioctx, image: image.mirror_image_promote(force))
@classmethod
def demote_image(cls, image_name: str, pool_name: str, namespace: str):
rbd_image_call(pool_name, namespace, image_name,
lambda ioctx, image: image.mirror_image_demote())
@classmethod
def resync_image(cls, image_name: str, pool_name: str, namespace: str):
rbd_image_call(pool_name, namespace, image_name,
lambda ioctx, image: image.mirror_image_resync())
@classmethod
def snapshot_schedule_add(cls, image_spec: str, interval: str):
_rbd_support_remote('mirror_snapshot_schedule_add', image_spec,
str(RBDSchedulerInterval(interval)))
@classmethod
def snapshot_schedule_remove(cls, image_spec: str):
_rbd_support_remote('mirror_snapshot_schedule_remove', image_spec)
class RbdImageMetadataService(object):
def __init__(self, image):
self._image = image
def list(self):
result = self._image.metadata_list()
# filter out configuration metadata
return {v[0]: v[1] for v in result if not v[0].startswith('conf_')}
def get(self, name):
return self._image.metadata_get(name)
def set(self, name, value):
self._image.metadata_set(name, value)
def remove(self, name):
try:
self._image.metadata_remove(name)
except KeyError:
pass
def set_metadata(self, metadata):
for name, value in metadata.items():
if value is not None:
self.set(name, value)
else:
self.remove(name)
| 30,452 | 38.19305 | 166 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/services/rgw_client.py
|
# -*- coding: utf-8 -*-
# pylint: disable=C0302
# pylint: disable=too-many-branches
# pylint: disable=too-many-lines
import ipaddress
import json
import logging
import os
import re
import xml.etree.ElementTree as ET # noqa: N814
from subprocess import SubprocessError
from mgr_util import build_url
from .. import mgr
from ..awsauth import S3Auth
from ..exceptions import DashboardException
from ..rest_client import RequestException, RestClient
from ..settings import Settings
from ..tools import dict_contains_path, dict_get, json_str_to_object, str_to_bool
try:
from typing import Any, Dict, List, Optional, Tuple, Union
except ImportError:
pass # For typing only
logger = logging.getLogger('rgw_client')
class NoRgwDaemonsException(Exception):
def __init__(self):
super().__init__('No RGW service is running.')
class NoCredentialsException(Exception):
def __init__(self):
super(NoCredentialsException, self).__init__(
'No RGW credentials found, '
'please consult the documentation on how to enable RGW for '
'the dashboard.')
class RgwAdminException(Exception):
pass
class RgwDaemon:
"""Simple representation of a daemon."""
host: str
name: str
port: int
ssl: bool
realm_name: str
zonegroup_name: str
zone_name: str
def _get_daemons() -> Dict[str, RgwDaemon]:
"""
Retrieve RGW daemon info from MGR.
"""
service_map = mgr.get('service_map')
if not dict_contains_path(service_map, ['services', 'rgw', 'daemons']):
raise NoRgwDaemonsException
daemons = {}
daemon_map = service_map['services']['rgw']['daemons']
for key in daemon_map.keys():
if dict_contains_path(daemon_map[key], ['metadata', 'frontend_config#0']):
daemon = _determine_rgw_addr(daemon_map[key])
daemon.name = daemon_map[key]['metadata']['id']
daemon.realm_name = daemon_map[key]['metadata']['realm_name']
daemon.zonegroup_name = daemon_map[key]['metadata']['zonegroup_name']
daemon.zone_name = daemon_map[key]['metadata']['zone_name']
daemons[daemon.name] = daemon
logger.info('Found RGW daemon with configuration: host=%s, port=%d, ssl=%s',
daemon.host, daemon.port, str(daemon.ssl))
if not daemons:
raise NoRgwDaemonsException
return daemons
def _determine_rgw_addr(daemon_info: Dict[str, Any]) -> RgwDaemon:
"""
Parse RGW daemon info to determine the configured host (IP address) and port.
"""
daemon = RgwDaemon()
daemon.host = daemon_info['metadata']['hostname']
daemon.port, daemon.ssl = _parse_frontend_config(daemon_info['metadata']['frontend_config#0'])
return daemon
def _parse_addr(value) -> str:
"""
Get the IP address the RGW is running on.
>>> _parse_addr('192.168.178.3:49774/1534999298')
'192.168.178.3'
>>> _parse_addr('[2001:db8:85a3::8a2e:370:7334]:49774/1534999298')
'2001:db8:85a3::8a2e:370:7334'
>>> _parse_addr('xyz')
Traceback (most recent call last):
...
LookupError: Failed to determine RGW address
>>> _parse_addr('192.168.178.a:8080/123456789')
Traceback (most recent call last):
...
LookupError: Invalid RGW address '192.168.178.a' found
>>> _parse_addr('[2001:0db8:1234]:443/123456789')
Traceback (most recent call last):
...
LookupError: Invalid RGW address '2001:0db8:1234' found
>>> _parse_addr('2001:0db8::1234:49774/1534999298')
Traceback (most recent call last):
...
LookupError: Failed to determine RGW address
:param value: The string to process. The syntax is '<HOST>:<PORT>/<NONCE>'.
:type: str
:raises LookupError if parsing fails to determine the IP address.
:return: The IP address.
:rtype: str
"""
match = re.search(r'^(\[)?(?(1)([^\]]+)\]|([^:]+)):\d+/\d+?', value)
if match:
# IPv4:
# Group 0: 192.168.178.3:49774/1534999298
# Group 3: 192.168.178.3
# IPv6:
# Group 0: [2001:db8:85a3::8a2e:370:7334]:49774/1534999298
# Group 1: [
# Group 2: 2001:db8:85a3::8a2e:370:7334
addr = match.group(3) if match.group(3) else match.group(2)
try:
ipaddress.ip_address(addr)
return addr
except ValueError:
raise LookupError('Invalid RGW address \'{}\' found'.format(addr))
raise LookupError('Failed to determine RGW address')
def _parse_frontend_config(config) -> Tuple[int, bool]:
"""
Get the port the RGW is running on. Due the complexity of the
syntax not all variations are supported.
If there are multiple (ssl_)ports/(ssl_)endpoints options, then
the first found option will be returned.
Get more details about the configuration syntax here:
http://docs.ceph.com/en/latest/radosgw/frontends/
https://civetweb.github.io/civetweb/UserManual.html
:param config: The configuration string to parse.
:type config: str
:raises LookupError if parsing fails to determine the port.
:return: A tuple containing the port number and the information
whether SSL is used.
:rtype: (int, boolean)
"""
match = re.search(r'^(beast|civetweb)\s+.+$', config)
if match:
if match.group(1) == 'beast':
match = re.search(r'(port|ssl_port|endpoint|ssl_endpoint)=(.+)',
config)
if match:
option_name = match.group(1)
if option_name in ['port', 'ssl_port']:
match = re.search(r'(\d+)', match.group(2))
if match:
port = int(match.group(1))
ssl = option_name == 'ssl_port'
return port, ssl
if option_name in ['endpoint', 'ssl_endpoint']:
match = re.search(r'([\d.]+|\[.+\])(:(\d+))?',
match.group(2)) # type: ignore
if match:
port = int(match.group(3)) if \
match.group(2) is not None else 443 if \
option_name == 'ssl_endpoint' else \
80
ssl = option_name == 'ssl_endpoint'
return port, ssl
if match.group(1) == 'civetweb': # type: ignore
match = re.search(r'port=(.*:)?(\d+)(s)?', config)
if match:
port = int(match.group(2))
ssl = match.group(3) == 's'
return port, ssl
raise LookupError('Failed to determine RGW port from "{}"'.format(config))
def _parse_secrets(user: str, data: dict) -> Tuple[str, str]:
for key in data.get('keys', []):
if key.get('user') == user and data.get('system') in ['true', True]:
access_key = key.get('access_key')
secret_key = key.get('secret_key')
return access_key, secret_key
return '', ''
def _get_user_keys(user: str, realm: Optional[str] = None) -> Tuple[str, str]:
access_key = ''
secret_key = ''
rgw_user_info_cmd = ['user', 'info', '--uid', user]
cmd_realm_option = ['--rgw-realm', realm] if realm else []
if realm:
rgw_user_info_cmd += cmd_realm_option
try:
_, out, err = mgr.send_rgwadmin_command(rgw_user_info_cmd)
if out:
access_key, secret_key = _parse_secrets(user, out)
if not access_key:
rgw_create_user_cmd = [
'user', 'create',
'--uid', user,
'--display-name', 'Ceph Dashboard',
'--system',
] + cmd_realm_option
_, out, err = mgr.send_rgwadmin_command(rgw_create_user_cmd)
if out:
access_key, secret_key = _parse_secrets(user, out)
if not access_key:
logger.error('Unable to create rgw user "%s": %s', user, err)
except SubprocessError as error:
logger.exception(error)
return access_key, secret_key
def configure_rgw_credentials():
logger.info('Configuring dashboard RGW credentials')
user = 'dashboard'
realms = []
access_key = ''
secret_key = ''
try:
_, out, err = mgr.send_rgwadmin_command(['realm', 'list'])
if out:
realms = out.get('realms', [])
if err:
logger.error('Unable to list RGW realms: %s', err)
if realms:
realm_access_keys = {}
realm_secret_keys = {}
for realm in realms:
realm_access_key, realm_secret_key = _get_user_keys(user, realm)
if realm_access_key:
realm_access_keys[realm] = realm_access_key
realm_secret_keys[realm] = realm_secret_key
if realm_access_keys:
access_key = json.dumps(realm_access_keys)
secret_key = json.dumps(realm_secret_keys)
else:
access_key, secret_key = _get_user_keys(user)
assert access_key and secret_key
Settings.RGW_API_ACCESS_KEY = access_key
Settings.RGW_API_SECRET_KEY = secret_key
except (AssertionError, SubprocessError) as error:
logger.exception(error)
raise NoCredentialsException
# pylint: disable=R0904
class RgwClient(RestClient):
_host = None
_port = None
_ssl = None
_user_instances = {} # type: Dict[str, Dict[str, RgwClient]]
_config_instances = {} # type: Dict[str, RgwClient]
_rgw_settings_snapshot = None
_daemons: Dict[str, RgwDaemon] = {}
daemon: RgwDaemon
got_keys_from_config: bool
userid: str
@staticmethod
def _handle_response_status_code(status_code: int) -> int:
# Do not return auth error codes (so they are not handled as ceph API user auth errors).
return 404 if status_code in [401, 403] else status_code
@staticmethod
def _get_daemon_connection_info(daemon_name: str) -> dict:
try:
realm_name = RgwClient._daemons[daemon_name].realm_name
access_key = Settings.RGW_API_ACCESS_KEY[realm_name]
secret_key = Settings.RGW_API_SECRET_KEY[realm_name]
except TypeError:
# Legacy string values.
access_key = Settings.RGW_API_ACCESS_KEY
secret_key = Settings.RGW_API_SECRET_KEY
except KeyError as error:
raise DashboardException(msg='Credentials not found for RGW Daemon: {}'.format(error),
http_status_code=404,
component='rgw')
return {'access_key': access_key, 'secret_key': secret_key}
def _get_daemon_zone_info(self): # type: () -> dict
return json_str_to_object(self.proxy('GET', 'config?type=zone', None, None))
def _get_realms_info(self): # type: () -> dict
return json_str_to_object(self.proxy('GET', 'realm?list', None, None))
def _get_realm_info(self, realm_id: str) -> Dict[str, Any]:
return json_str_to_object(self.proxy('GET', f'realm?id={realm_id}', None, None))
@staticmethod
def _rgw_settings():
return (Settings.RGW_API_ACCESS_KEY,
Settings.RGW_API_SECRET_KEY,
Settings.RGW_API_ADMIN_RESOURCE,
Settings.RGW_API_SSL_VERIFY)
@staticmethod
def instance(userid: Optional[str] = None,
daemon_name: Optional[str] = None) -> 'RgwClient':
# pylint: disable=too-many-branches
RgwClient._daemons = _get_daemons()
# The API access key and secret key are mandatory for a minimal configuration.
if not (Settings.RGW_API_ACCESS_KEY and Settings.RGW_API_SECRET_KEY):
configure_rgw_credentials()
if not daemon_name:
# Select 1st daemon:
daemon_name = next(iter(RgwClient._daemons.keys()))
# Discard all cached instances if any rgw setting has changed
if RgwClient._rgw_settings_snapshot != RgwClient._rgw_settings():
RgwClient._rgw_settings_snapshot = RgwClient._rgw_settings()
RgwClient.drop_instance()
if daemon_name not in RgwClient._config_instances:
connection_info = RgwClient._get_daemon_connection_info(daemon_name)
RgwClient._config_instances[daemon_name] = RgwClient(connection_info['access_key'],
connection_info['secret_key'],
daemon_name)
if not userid or userid == RgwClient._config_instances[daemon_name].userid:
return RgwClient._config_instances[daemon_name]
if daemon_name not in RgwClient._user_instances \
or userid not in RgwClient._user_instances[daemon_name]:
# Get the access and secret keys for the specified user.
keys = RgwClient._config_instances[daemon_name].get_user_keys(userid)
if not keys:
raise RequestException(
"User '{}' does not have any keys configured.".format(
userid))
instance = RgwClient(keys['access_key'],
keys['secret_key'],
daemon_name,
userid)
RgwClient._user_instances.update({daemon_name: {userid: instance}})
return RgwClient._user_instances[daemon_name][userid]
@staticmethod
def admin_instance(daemon_name: Optional[str] = None) -> 'RgwClient':
return RgwClient.instance(daemon_name=daemon_name)
@staticmethod
def drop_instance(instance: Optional['RgwClient'] = None):
"""
Drop a cached instance or all.
"""
if instance:
if instance.got_keys_from_config:
del RgwClient._config_instances[instance.daemon.name]
else:
del RgwClient._user_instances[instance.daemon.name][instance.userid]
else:
RgwClient._config_instances.clear()
RgwClient._user_instances.clear()
def _reset_login(self):
if self.got_keys_from_config:
raise RequestException('Authentication failed for the "{}" user: wrong credentials'
.format(self.userid), status_code=401)
logger.info("Fetching new keys for user: %s", self.userid)
keys = RgwClient.admin_instance(daemon_name=self.daemon.name).get_user_keys(self.userid)
self.auth = S3Auth(keys['access_key'], keys['secret_key'],
service_url=self.service_url)
def __init__(self,
access_key: str,
secret_key: str,
daemon_name: str,
user_id: Optional[str] = None) -> None:
try:
daemon = RgwClient._daemons[daemon_name]
except KeyError as error:
raise DashboardException(msg='RGW Daemon not found: {}'.format(error),
http_status_code=404,
component='rgw')
ssl_verify = Settings.RGW_API_SSL_VERIFY
self.admin_path = Settings.RGW_API_ADMIN_RESOURCE
self.service_url = build_url(host=daemon.host, port=daemon.port)
self.auth = S3Auth(access_key, secret_key, service_url=self.service_url)
super(RgwClient, self).__init__(daemon.host,
daemon.port,
'RGW',
daemon.ssl,
self.auth,
ssl_verify=ssl_verify)
self.got_keys_from_config = not user_id
try:
self.userid = self._get_user_id(self.admin_path) if self.got_keys_from_config \
else user_id
except RequestException as error:
logger.exception(error)
msg = 'Error connecting to Object Gateway'
if error.status_code == 404:
msg = '{}: {}'.format(msg, str(error))
raise DashboardException(msg=msg,
http_status_code=error.status_code,
component='rgw')
self.daemon = daemon
logger.info("Created new connection: daemon=%s, host=%s, port=%s, ssl=%d, sslverify=%d",
daemon.name, daemon.host, daemon.port, daemon.ssl, ssl_verify)
@RestClient.api_get('/', resp_structure='[0] > (ID & DisplayName)')
def is_service_online(self, request=None) -> bool:
"""
Consider the service as online if the response contains the
specified keys. Nothing more is checked here.
"""
_ = request({'format': 'json'})
return True
@RestClient.api_get('/{admin_path}/metadata/user?myself',
resp_structure='data > user_id')
def _get_user_id(self, admin_path, request=None):
# pylint: disable=unused-argument
"""
Get the user ID of the user that is used to communicate with the
RGW Admin Ops API.
:rtype: str
:return: The user ID of the user that is used to sign the
RGW Admin Ops API calls.
"""
response = request()
return response['data']['user_id']
@RestClient.api_get('/{admin_path}/metadata/user', resp_structure='[+]')
def _user_exists(self, admin_path, user_id, request=None):
# pylint: disable=unused-argument
response = request()
if user_id:
return user_id in response
return self.userid in response
def user_exists(self, user_id=None):
return self._user_exists(self.admin_path, user_id)
@RestClient.api_get('/{admin_path}/metadata/user?key={userid}',
resp_structure='data > system')
def _is_system_user(self, admin_path, userid, request=None) -> bool:
# pylint: disable=unused-argument
response = request()
return response['data']['system']
def is_system_user(self) -> bool:
return self._is_system_user(self.admin_path, self.userid)
@RestClient.api_get(
'/{admin_path}/user',
resp_structure='tenant & user_id & email & keys[*] > '
' (user & access_key & secret_key)')
def _admin_get_user_keys(self, admin_path, userid, request=None):
# pylint: disable=unused-argument
colon_idx = userid.find(':')
user = userid if colon_idx == -1 else userid[:colon_idx]
response = request({'uid': user})
for key in response['keys']:
if key['user'] == userid:
return {
'access_key': key['access_key'],
'secret_key': key['secret_key']
}
return None
def get_user_keys(self, userid):
return self._admin_get_user_keys(self.admin_path, userid)
@RestClient.api('/{admin_path}/{path}')
def _proxy_request(
self, # pylint: disable=too-many-arguments
admin_path,
path,
method,
params,
data,
request=None):
# pylint: disable=unused-argument
return request(method=method, params=params, data=data,
raw_content=True)
def proxy(self, method, path, params, data):
logger.debug("proxying method=%s path=%s params=%s data=%s",
method, path, params, data)
return self._proxy_request(self.admin_path, path, method,
params, data)
@RestClient.api_get('/', resp_structure='[1][*] > Name')
def get_buckets(self, request=None):
"""
Get a list of names from all existing buckets of this user.
:return: Returns a list of bucket names.
"""
response = request({'format': 'json'})
return [bucket['Name'] for bucket in response[1]]
@RestClient.api_get('/{bucket_name}')
def bucket_exists(self, bucket_name, userid, request=None):
"""
Check if the specified bucket exists for this user.
:param bucket_name: The name of the bucket.
:return: Returns True if the bucket exists, otherwise False.
"""
# pylint: disable=unused-argument
try:
request()
my_buckets = self.get_buckets()
if bucket_name not in my_buckets:
raise RequestException(
'Bucket "{}" belongs to other user'.format(bucket_name),
403)
return True
except RequestException as e:
if e.status_code == 404:
return False
raise e
@RestClient.api_put('/{bucket_name}')
def create_bucket(self, bucket_name, zonegroup=None,
placement_target=None, lock_enabled=False,
request=None):
logger.info("Creating bucket: %s, zonegroup: %s, placement_target: %s",
bucket_name, zonegroup, placement_target)
data = None
if zonegroup and placement_target:
create_bucket_configuration = ET.Element('CreateBucketConfiguration')
location_constraint = ET.SubElement(create_bucket_configuration, 'LocationConstraint')
location_constraint.text = '{}:{}'.format(zonegroup, placement_target)
data = ET.tostring(create_bucket_configuration, encoding='unicode')
headers = None # type: Optional[dict]
if lock_enabled:
headers = {'x-amz-bucket-object-lock-enabled': 'true'}
return request(data=data, headers=headers)
def get_placement_targets(self): # type: () -> dict
zone = self._get_daemon_zone_info()
placement_targets = [] # type: List[Dict]
for placement_pool in zone['placement_pools']:
placement_targets.append(
{
'name': placement_pool['key'],
'data_pool': placement_pool['val']['storage_classes']['STANDARD']['data_pool']
}
)
return {'zonegroup': self.daemon.zonegroup_name,
'placement_targets': placement_targets}
def get_realms(self): # type: () -> List
realms_info = self._get_realms_info()
if 'realms' in realms_info and realms_info['realms']:
return realms_info['realms']
return []
def get_default_realm(self):
realms_info = self._get_realms_info()
if 'default_info' in realms_info and realms_info['default_info']:
realm_info = self._get_realm_info(realms_info['default_info'])
if 'name' in realm_info and realm_info['name']:
return realm_info['name']
return None
@RestClient.api_get('/{bucket_name}?versioning')
def get_bucket_versioning(self, bucket_name, request=None):
"""
Get bucket versioning.
:param str bucket_name: the name of the bucket.
:return: versioning info
:rtype: Dict
"""
# pylint: disable=unused-argument
result = request()
if 'Status' not in result:
result['Status'] = 'Suspended'
if 'MfaDelete' not in result:
result['MfaDelete'] = 'Disabled'
return result
@RestClient.api_put('/{bucket_name}?versioning')
def set_bucket_versioning(self, bucket_name, versioning_state, mfa_delete,
mfa_token_serial, mfa_token_pin, request=None):
"""
Set bucket versioning.
:param str bucket_name: the name of the bucket.
:param str versioning_state:
https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html
:param str mfa_delete: MFA Delete state.
:param str mfa_token_serial:
https://docs.ceph.com/docs/master/radosgw/mfa/
:param str mfa_token_pin: value of a TOTP token at a certain time (auth code)
:return: None
"""
# pylint: disable=unused-argument
versioning_configuration = ET.Element('VersioningConfiguration')
status_element = ET.SubElement(versioning_configuration, 'Status')
status_element.text = versioning_state
headers = {}
if mfa_delete and mfa_token_serial and mfa_token_pin:
headers['x-amz-mfa'] = '{} {}'.format(mfa_token_serial, mfa_token_pin)
mfa_delete_element = ET.SubElement(versioning_configuration, 'MfaDelete')
mfa_delete_element.text = mfa_delete
data = ET.tostring(versioning_configuration, encoding='unicode')
try:
request(data=data, headers=headers)
except RequestException as error:
msg = str(error)
if mfa_delete and mfa_token_serial and mfa_token_pin \
and 'AccessDenied' in error.content.decode():
msg = 'Bad MFA credentials: {}'.format(msg)
raise DashboardException(msg=msg,
http_status_code=error.status_code,
component='rgw')
@RestClient.api_get('/{bucket_name}?encryption')
def get_bucket_encryption(self, bucket_name, request=None):
# pylint: disable=unused-argument
try:
result = request() # type: ignore
result['Status'] = 'Enabled'
return result
except RequestException as e:
if e.content:
content = json_str_to_object(e.content)
if content.get(
'Code') == 'ServerSideEncryptionConfigurationNotFoundError':
return {
'Status': 'Disabled',
}
raise e
@RestClient.api_delete('/{bucket_name}?encryption')
def delete_bucket_encryption(self, bucket_name, request=None):
# pylint: disable=unused-argument
result = request() # type: ignore
return result
@RestClient.api_put('/{bucket_name}?encryption')
def set_bucket_encryption(self, bucket_name, key_id,
sse_algorithm, request: Optional[object] = None):
# pylint: disable=unused-argument
encryption_configuration = ET.Element('ServerSideEncryptionConfiguration')
rule_element = ET.SubElement(encryption_configuration, 'Rule')
default_encryption_element = ET.SubElement(rule_element,
'ApplyServerSideEncryptionByDefault')
sse_algo_element = ET.SubElement(default_encryption_element,
'SSEAlgorithm')
sse_algo_element.text = sse_algorithm
if sse_algorithm == 'aws:kms':
kms_master_key_element = ET.SubElement(default_encryption_element,
'KMSMasterKeyID')
kms_master_key_element.text = key_id
data = ET.tostring(encryption_configuration, encoding='unicode')
try:
_ = request(data=data) # type: ignore
except RequestException as e:
raise DashboardException(msg=str(e), component='rgw')
@RestClient.api_get('/{bucket_name}?object-lock')
def get_bucket_locking(self, bucket_name, request=None):
# type: (str, Optional[object]) -> dict
"""
Gets the locking configuration for a bucket. The locking
configuration will be applied by default to every new object
placed in the specified bucket.
:param bucket_name: The name of the bucket.
:type bucket_name: str
:return: The locking configuration.
:rtype: Dict
"""
# pylint: disable=unused-argument
# Try to get the Object Lock configuration. If there is none,
# then return default values.
try:
result = request() # type: ignore
return {
'lock_enabled': dict_get(result, 'ObjectLockEnabled') == 'Enabled',
'lock_mode': dict_get(result, 'Rule.DefaultRetention.Mode'),
'lock_retention_period_days': dict_get(result, 'Rule.DefaultRetention.Days', 0),
'lock_retention_period_years': dict_get(result, 'Rule.DefaultRetention.Years', 0)
}
except RequestException as e:
if e.content:
content = json_str_to_object(e.content)
if content.get(
'Code') == 'ObjectLockConfigurationNotFoundError':
return {
'lock_enabled': False,
'lock_mode': 'compliance',
'lock_retention_period_days': None,
'lock_retention_period_years': None
}
raise e
@RestClient.api_put('/{bucket_name}?object-lock')
def set_bucket_locking(self,
bucket_name: str,
mode: str,
retention_period_days: Optional[Union[int, str]] = None,
retention_period_years: Optional[Union[int, str]] = None,
request: Optional[object] = None) -> None:
"""
Places the locking configuration on the specified bucket. The
locking configuration will be applied by default to every new
object placed in the specified bucket.
:param bucket_name: The name of the bucket.
:type bucket_name: str
:param mode: The lock mode, e.g. `COMPLIANCE` or `GOVERNANCE`.
:type mode: str
:param retention_period_days:
:type retention_period_days: int
:param retention_period_years:
:type retention_period_years: int
:rtype: None
"""
# pylint: disable=unused-argument
retention_period_days, retention_period_years = self.perform_validations(
retention_period_days, retention_period_years, mode)
# Generate the XML data like this:
# <ObjectLockConfiguration>
# <ObjectLockEnabled>string</ObjectLockEnabled>
# <Rule>
# <DefaultRetention>
# <Days>integer</Days>
# <Mode>string</Mode>
# <Years>integer</Years>
# </DefaultRetention>
# </Rule>
# </ObjectLockConfiguration>
locking_configuration = ET.Element('ObjectLockConfiguration')
enabled_element = ET.SubElement(locking_configuration,
'ObjectLockEnabled')
enabled_element.text = 'Enabled' # Locking can't be disabled.
rule_element = ET.SubElement(locking_configuration, 'Rule')
default_retention_element = ET.SubElement(rule_element,
'DefaultRetention')
mode_element = ET.SubElement(default_retention_element, 'Mode')
mode_element.text = mode.upper()
if retention_period_days:
days_element = ET.SubElement(default_retention_element, 'Days')
days_element.text = str(retention_period_days)
if retention_period_years:
years_element = ET.SubElement(default_retention_element, 'Years')
years_element.text = str(retention_period_years)
data = ET.tostring(locking_configuration, encoding='unicode')
try:
_ = request(data=data) # type: ignore
except RequestException as e:
raise DashboardException(msg=str(e), component='rgw')
def list_roles(self) -> List[Dict[str, Any]]:
rgw_list_roles_command = ['role', 'list']
code, roles, err = mgr.send_rgwadmin_command(rgw_list_roles_command)
if code < 0:
logger.warning('Error listing roles with code %d: %s', code, err)
return []
return roles
def create_role(self, role_name: str, role_path: str, role_assume_policy_doc: str) -> None:
try:
json.loads(role_assume_policy_doc)
except: # noqa: E722
raise DashboardException('Assume role policy document is not a valid json')
# valid values:
# pylint: disable=C0301
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html#cfn-iam-role-path # noqa: E501
if len(role_name) > 64:
raise DashboardException(
f'Role name "{role_name}" is invalid. Should be 64 characters or less')
role_name_regex = '[0-9a-zA-Z_+=,.@-]+'
if not re.fullmatch(role_name_regex, role_name):
raise DashboardException(
f'Role name "{role_name}" is invalid. Valid characters are "{role_name_regex}"')
if not os.path.isabs(role_path):
raise DashboardException(
f'Role path "{role_path}" is invalid. It should be an absolute path')
if role_path[-1] != '/':
raise DashboardException(
f'Role path "{role_path}" is invalid. It should start and end with a slash')
path_regex = '(\u002F)|(\u002F[\u0021-\u007E]+\u002F)'
if not re.fullmatch(path_regex, role_path):
raise DashboardException(
(f'Role path "{role_path}" is invalid.'
f'Role path should follow the pattern "{path_regex}"'))
rgw_create_role_command = ['role', 'create', '--role-name', role_name, '--path', role_path]
if role_assume_policy_doc:
rgw_create_role_command += ['--assume-role-policy-doc', f"{role_assume_policy_doc}"]
code, _roles, _err = mgr.send_rgwadmin_command(rgw_create_role_command,
stdout_as_json=False)
if code != 0:
# pylint: disable=C0301
link = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html#cfn-iam-role-path' # noqa: E501
msg = (f'Error creating role with code {code}: '
'Looks like the document has a wrong format.'
f' For more information about the format look at {link}')
raise DashboardException(msg=msg, component='rgw')
def perform_validations(self, retention_period_days, retention_period_years, mode):
try:
retention_period_days = int(retention_period_days) if retention_period_days else 0
retention_period_years = int(retention_period_years) if retention_period_years else 0
if retention_period_days < 0 or retention_period_years < 0:
raise ValueError
except (TypeError, ValueError):
msg = "Retention period must be a positive integer."
raise DashboardException(msg=msg, component='rgw')
if retention_period_days and retention_period_years:
# https://docs.aws.amazon.com/AmazonS3/latest/API/archive-RESTBucketPUTObjectLockConfiguration.html
msg = "Retention period requires either Days or Years. "\
"You can't specify both at the same time."
raise DashboardException(msg=msg, component='rgw')
if not retention_period_days and not retention_period_years:
msg = "Retention period requires either Days or Years. "\
"You must specify at least one."
raise DashboardException(msg=msg, component='rgw')
if not isinstance(mode, str) or mode.upper() not in ['COMPLIANCE', 'GOVERNANCE']:
msg = "Retention mode must be either COMPLIANCE or GOVERNANCE."
raise DashboardException(msg=msg, component='rgw')
return retention_period_days, retention_period_years
class RgwMultisite:
def migrate_to_multisite(self, realm_name: str, zonegroup_name: str, zone_name: str,
zonegroup_endpoints: str, zone_endpoints: str, access_key: str,
secret_key: str):
rgw_realm_create_cmd = ['realm', 'create', '--rgw-realm', realm_name, '--default']
try:
exit_code, _, err = mgr.send_rgwadmin_command(rgw_realm_create_cmd, False)
if exit_code > 0:
raise DashboardException(e=err, msg='Unable to create realm',
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
rgw_zonegroup_edit_cmd = ['zonegroup', 'rename', '--rgw-zonegroup', 'default',
'--zonegroup-new-name', zonegroup_name]
try:
exit_code, _, err = mgr.send_rgwadmin_command(rgw_zonegroup_edit_cmd, False)
if exit_code > 0:
raise DashboardException(e=err, msg='Unable to rename zonegroup to {}'.format(zonegroup_name), # noqa E501 #pylint: disable=line-too-long
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
rgw_zone_edit_cmd = ['zone', 'rename', '--rgw-zone',
'default', '--zone-new-name', zone_name,
'--rgw-zonegroup', zonegroup_name]
try:
exit_code, _, err = mgr.send_rgwadmin_command(rgw_zone_edit_cmd, False)
if exit_code > 0:
raise DashboardException(e=err, msg='Unable to rename zone to {}'.format(zone_name), # noqa E501 #pylint: disable=line-too-long
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
rgw_zonegroup_modify_cmd = ['zonegroup', 'modify',
'--rgw-realm', realm_name,
'--rgw-zonegroup', zonegroup_name]
if zonegroup_endpoints:
rgw_zonegroup_modify_cmd.append('--endpoints')
rgw_zonegroup_modify_cmd.append(zonegroup_endpoints)
rgw_zonegroup_modify_cmd.append('--master')
rgw_zonegroup_modify_cmd.append('--default')
try:
exit_code, _, err = mgr.send_rgwadmin_command(rgw_zonegroup_modify_cmd)
if exit_code > 0:
raise DashboardException(e=err, msg='Unable to modify zonegroup {}'.format(zonegroup_name), # noqa E501 #pylint: disable=line-too-long
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
rgw_zone_modify_cmd = ['zone', 'modify', '--rgw-realm', realm_name,
'--rgw-zonegroup', zonegroup_name,
'--rgw-zone', zone_name]
if zone_endpoints:
rgw_zone_modify_cmd.append('--endpoints')
rgw_zone_modify_cmd.append(zone_endpoints)
rgw_zone_modify_cmd.append('--master')
rgw_zone_modify_cmd.append('--default')
try:
exit_code, _, err = mgr.send_rgwadmin_command(rgw_zone_modify_cmd)
if exit_code > 0:
raise DashboardException(e=err, msg='Unable to modify zone',
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
if access_key and secret_key:
rgw_zone_modify_cmd = ['zone', 'modify', '--rgw-zone', zone_name,
'--access-key', access_key, '--secret', secret_key]
try:
exit_code, _, err = mgr.send_rgwadmin_command(rgw_zone_modify_cmd)
if exit_code > 0:
raise DashboardException(e=err, msg='Unable to modify zone',
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
def create_realm(self, realm_name: str, default: bool):
rgw_realm_create_cmd = ['realm', 'create']
cmd_create_realm_options = ['--rgw-realm', realm_name]
if default != 'false':
cmd_create_realm_options.append('--default')
rgw_realm_create_cmd += cmd_create_realm_options
try:
exit_code, _, _ = mgr.send_rgwadmin_command(rgw_realm_create_cmd)
if exit_code > 0:
raise DashboardException(msg='Unable to create realm',
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
def list_realms(self):
rgw_realm_list = {}
rgw_realm_list_cmd = ['realm', 'list']
try:
exit_code, out, _ = mgr.send_rgwadmin_command(rgw_realm_list_cmd)
if exit_code > 0:
raise DashboardException(msg='Unable to fetch realm list',
http_status_code=500, component='rgw')
rgw_realm_list = out
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
return rgw_realm_list
def get_realm(self, realm_name: str):
realm_info = {}
rgw_realm_info_cmd = ['realm', 'get', '--rgw-realm', realm_name]
try:
exit_code, out, _ = mgr.send_rgwadmin_command(rgw_realm_info_cmd)
if exit_code > 0:
raise DashboardException('Unable to get realm info',
http_status_code=500, component='rgw')
realm_info = out
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
return realm_info
def get_all_realms_info(self):
all_realms_info = {}
realms_info = []
rgw_realm_list = self.list_realms()
if 'realms' in rgw_realm_list:
if rgw_realm_list['realms'] != []:
for rgw_realm in rgw_realm_list['realms']:
realm_info = self.get_realm(rgw_realm)
realms_info.append(realm_info)
all_realms_info['realms'] = realms_info # type: ignore
else:
all_realms_info['realms'] = [] # type: ignore
if 'default_info' in rgw_realm_list and rgw_realm_list['default_info'] != '':
all_realms_info['default_realm'] = rgw_realm_list['default_info'] # type: ignore
else:
all_realms_info['default_realm'] = '' # type: ignore
return all_realms_info
def edit_realm(self, realm_name: str, new_realm_name: str, default: str = ''):
rgw_realm_edit_cmd = []
if new_realm_name != realm_name:
rgw_realm_edit_cmd = ['realm', 'rename', '--rgw-realm',
realm_name, '--realm-new-name', new_realm_name]
try:
exit_code, _, err = mgr.send_rgwadmin_command(rgw_realm_edit_cmd, False)
if exit_code > 0:
raise DashboardException(e=err, msg='Unable to edit realm',
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
if default and str_to_bool(default):
rgw_realm_edit_cmd = ['realm', 'default', '--rgw-realm', new_realm_name]
try:
exit_code, _, _ = mgr.send_rgwadmin_command(rgw_realm_edit_cmd, False)
if exit_code > 0:
raise DashboardException(msg='Unable to set {} as default realm'.format(new_realm_name), # noqa E501 #pylint: disable=line-too-long
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
def delete_realm(self, realm_name: str):
rgw_delete_realm_cmd = ['realm', 'rm', '--rgw-realm', realm_name]
try:
exit_code, _, _ = mgr.send_rgwadmin_command(rgw_delete_realm_cmd)
if exit_code > 0:
raise DashboardException(msg='Unable to delete realm',
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
def create_zonegroup(self, realm_name: str, zonegroup_name: str,
default: bool, master: bool, endpoints: str):
rgw_zonegroup_create_cmd = ['zonegroup', 'create']
cmd_create_zonegroup_options = ['--rgw-zonegroup', zonegroup_name]
if realm_name != 'null':
cmd_create_zonegroup_options.append('--rgw-realm')
cmd_create_zonegroup_options.append(realm_name)
if default != 'false':
cmd_create_zonegroup_options.append('--default')
if master != 'false':
cmd_create_zonegroup_options.append('--master')
if endpoints:
cmd_create_zonegroup_options.append('--endpoints')
cmd_create_zonegroup_options.append(endpoints)
rgw_zonegroup_create_cmd += cmd_create_zonegroup_options
try:
exit_code, out, err = mgr.send_rgwadmin_command(rgw_zonegroup_create_cmd)
if exit_code > 0:
raise DashboardException(e=err, msg='Unable to get realm info',
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
return out
def list_zonegroups(self):
rgw_zonegroup_list = {}
rgw_zonegroup_list_cmd = ['zonegroup', 'list']
try:
exit_code, out, _ = mgr.send_rgwadmin_command(rgw_zonegroup_list_cmd)
if exit_code > 0:
raise DashboardException(msg='Unable to fetch zonegroup list',
http_status_code=500, component='rgw')
rgw_zonegroup_list = out
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
return rgw_zonegroup_list
def get_zonegroup(self, zonegroup_name: str):
zonegroup_info = {}
if zonegroup_name != 'default':
rgw_zonegroup_info_cmd = ['zonegroup', 'get', '--rgw-zonegroup', zonegroup_name]
else:
rgw_zonegroup_info_cmd = ['zonegroup', 'get', '--rgw-zonegroup',
zonegroup_name, '--rgw-realm', 'default']
try:
exit_code, out, _ = mgr.send_rgwadmin_command(rgw_zonegroup_info_cmd)
if exit_code > 0:
raise DashboardException('Unable to get zonegroup info',
http_status_code=500, component='rgw')
zonegroup_info = out
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
return zonegroup_info
def get_all_zonegroups_info(self):
all_zonegroups_info = {}
zonegroups_info = []
rgw_zonegroup_list = self.list_zonegroups()
if 'zonegroups' in rgw_zonegroup_list:
if rgw_zonegroup_list['zonegroups'] != []:
for rgw_zonegroup in rgw_zonegroup_list['zonegroups']:
zonegroup_info = self.get_zonegroup(rgw_zonegroup)
zonegroups_info.append(zonegroup_info)
all_zonegroups_info['zonegroups'] = zonegroups_info # type: ignore
else:
all_zonegroups_info['zonegroups'] = [] # type: ignore
if 'default_info' in rgw_zonegroup_list and rgw_zonegroup_list['default_info'] != '':
all_zonegroups_info['default_zonegroup'] = rgw_zonegroup_list['default_info']
else:
all_zonegroups_info['default_zonegroup'] = '' # type: ignore
return all_zonegroups_info
def delete_zonegroup(self, zonegroup_name: str, delete_pools: str, pools: List[str]):
if delete_pools == 'true':
zonegroup_info = self.get_zonegroup(zonegroup_name)
rgw_delete_zonegroup_cmd = ['zonegroup', 'delete', '--rgw-zonegroup', zonegroup_name]
try:
exit_code, _, _ = mgr.send_rgwadmin_command(rgw_delete_zonegroup_cmd)
if exit_code > 0:
raise DashboardException(msg='Unable to delete zonegroup',
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
self.update_period()
if delete_pools == 'true':
for zone in zonegroup_info['zones']:
self.delete_zone(zone['name'], 'true', pools)
def modify_zonegroup(self, realm_name: str, zonegroup_name: str, default: str, master: str,
endpoints: str):
rgw_zonegroup_modify_cmd = ['zonegroup', 'modify',
'--rgw-realm', realm_name,
'--rgw-zonegroup', zonegroup_name]
if endpoints:
rgw_zonegroup_modify_cmd.append('--endpoints')
rgw_zonegroup_modify_cmd.append(endpoints)
if master and str_to_bool(master):
rgw_zonegroup_modify_cmd.append('--master')
if default and str_to_bool(default):
rgw_zonegroup_modify_cmd.append('--default')
try:
exit_code, _, err = mgr.send_rgwadmin_command(rgw_zonegroup_modify_cmd)
if exit_code > 0:
raise DashboardException(e=err, msg='Unable to modify zonegroup {}'.format(zonegroup_name), # noqa E501 #pylint: disable=line-too-long
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
self.update_period()
def add_or_remove_zone(self, zonegroup_name: str, zone_name: str, action: str):
if action == 'add':
rgw_zonegroup_add_zone_cmd = ['zonegroup', 'add', '--rgw-zonegroup',
zonegroup_name, '--rgw-zone', zone_name]
try:
exit_code, _, err = mgr.send_rgwadmin_command(rgw_zonegroup_add_zone_cmd)
if exit_code > 0:
raise DashboardException(e=err, msg='Unable to add zone {} to zonegroup {}'.format(zone_name, zonegroup_name), # noqa E501 #pylint: disable=line-too-long
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
self.update_period()
if action == 'remove':
rgw_zonegroup_rm_zone_cmd = ['zonegroup', 'remove',
'--rgw-zonegroup', zonegroup_name, '--rgw-zone', zone_name]
try:
exit_code, _, err = mgr.send_rgwadmin_command(rgw_zonegroup_rm_zone_cmd)
if exit_code > 0:
raise DashboardException(e=err, msg='Unable to remove zone {} from zonegroup {}'.format(zone_name, zonegroup_name), # noqa E501 #pylint: disable=line-too-long
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
self.update_period()
def get_placement_targets_by_zonegroup(self, zonegroup_name: str):
rgw_get_placement_cmd = ['zonegroup', 'placement',
'list', '--rgw-zonegroup', zonegroup_name]
try:
exit_code, out, err = mgr.send_rgwadmin_command(rgw_get_placement_cmd)
if exit_code > 0:
raise DashboardException(e=err, msg='Unable to get placement targets',
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
return out
def add_placement_targets(self, zonegroup_name: str, placement_targets: List[Dict]):
rgw_add_placement_cmd = ['zonegroup', 'placement', 'add']
for placement_target in placement_targets:
cmd_add_placement_options = ['--rgw-zonegroup', zonegroup_name,
'--placement-id', placement_target['placement_id']]
if placement_target['tags']:
cmd_add_placement_options += ['--tags', placement_target['tags']]
rgw_add_placement_cmd += cmd_add_placement_options
try:
exit_code, _, err = mgr.send_rgwadmin_command(rgw_add_placement_cmd)
if exit_code > 0:
raise DashboardException(e=err,
msg='Unable to add placement target {} to zonegroup {}'.format(placement_target['placement_id'], zonegroup_name), # noqa E501 #pylint: disable=line-too-long
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
self.update_period()
storage_classes = placement_target['storage_class'].split(",") if placement_target['storage_class'] else [] # noqa E501 #pylint: disable=line-too-long
if storage_classes:
for sc in storage_classes:
cmd_add_placement_options = ['--storage-class', sc]
try:
exit_code, _, err = mgr.send_rgwadmin_command(
rgw_add_placement_cmd + cmd_add_placement_options)
if exit_code > 0:
raise DashboardException(e=err,
msg='Unable to add placement target {} to zonegroup {}'.format(placement_target['placement_id'], zonegroup_name), # noqa E501 #pylint: disable=line-too-long
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
self.update_period()
def modify_placement_targets(self, zonegroup_name: str, placement_targets: List[Dict]):
rgw_add_placement_cmd = ['zonegroup', 'placement', 'modify']
for placement_target in placement_targets:
cmd_add_placement_options = ['--rgw-zonegroup', zonegroup_name,
'--placement-id', placement_target['placement_id']]
if placement_target['tags']:
cmd_add_placement_options += ['--tags', placement_target['tags']]
rgw_add_placement_cmd += cmd_add_placement_options
storage_classes = placement_target['storage_class'].split(",") if placement_target['storage_class'] else [] # noqa E501 #pylint: disable=line-too-long
if storage_classes:
for sc in storage_classes:
cmd_add_placement_options = []
cmd_add_placement_options = ['--storage-class', sc]
try:
exit_code, _, err = mgr.send_rgwadmin_command(
rgw_add_placement_cmd + cmd_add_placement_options)
if exit_code > 0:
raise DashboardException(e=err,
msg='Unable to add placement target {} to zonegroup {}'.format(placement_target['placement_id'], zonegroup_name), # noqa E501 #pylint: disable=line-too-long
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
self.update_period()
else:
try:
exit_code, _, err = mgr.send_rgwadmin_command(rgw_add_placement_cmd)
if exit_code > 0:
raise DashboardException(e=err,
msg='Unable to add placement target {} to zonegroup {}'.format(placement_target['placement_id'], zonegroup_name), # noqa E501 #pylint: disable=line-too-long
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
self.update_period()
# pylint: disable=W0102
def edit_zonegroup(self, realm_name: str, zonegroup_name: str, new_zonegroup_name: str,
default: str = '', master: str = '', endpoints: str = '',
add_zones: List[str] = [], remove_zones: List[str] = [],
placement_targets: List[Dict[str, str]] = []):
rgw_zonegroup_edit_cmd = []
if new_zonegroup_name != zonegroup_name:
rgw_zonegroup_edit_cmd = ['zonegroup', 'rename', '--rgw-zonegroup', zonegroup_name,
'--zonegroup-new-name', new_zonegroup_name]
try:
exit_code, _, err = mgr.send_rgwadmin_command(rgw_zonegroup_edit_cmd, False)
if exit_code > 0:
raise DashboardException(e=err, msg='Unable to rename zonegroup to {}'.format(new_zonegroup_name), # noqa E501 #pylint: disable=line-too-long
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
self.update_period()
self.modify_zonegroup(realm_name, new_zonegroup_name, default, master, endpoints)
if add_zones:
for zone_name in add_zones:
self.add_or_remove_zone(new_zonegroup_name, zone_name, 'add')
if remove_zones:
for zone_name in remove_zones:
self.add_or_remove_zone(new_zonegroup_name, zone_name, 'remove')
existing_placement_targets = self.get_placement_targets_by_zonegroup(new_zonegroup_name)
existing_placement_targets_ids = [pt['key'] for pt in existing_placement_targets]
if placement_targets:
for pt in placement_targets:
if pt['placement_id'] in existing_placement_targets_ids:
self.modify_placement_targets(new_zonegroup_name, placement_targets)
else:
self.add_placement_targets(new_zonegroup_name, placement_targets)
def update_period(self):
rgw_update_period_cmd = ['period', 'update', '--commit']
try:
exit_code, _, err = mgr.send_rgwadmin_command(rgw_update_period_cmd)
if exit_code > 0:
raise DashboardException(e=err, msg='Unable to update period',
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
def create_zone(self, zone_name, zonegroup_name, default, master, endpoints, access_key,
secret_key):
rgw_zone_create_cmd = ['zone', 'create']
cmd_create_zone_options = ['--rgw-zone', zone_name]
if zonegroup_name != 'null':
cmd_create_zone_options.append('--rgw-zonegroup')
cmd_create_zone_options.append(zonegroup_name)
if default != 'false':
cmd_create_zone_options.append('--default')
if master != 'false':
cmd_create_zone_options.append('--master')
if endpoints != 'null':
cmd_create_zone_options.append('--endpoints')
cmd_create_zone_options.append(endpoints)
if access_key is not None:
cmd_create_zone_options.append('--access-key')
cmd_create_zone_options.append(access_key)
if secret_key is not None:
cmd_create_zone_options.append('--secret')
cmd_create_zone_options.append(secret_key)
rgw_zone_create_cmd += cmd_create_zone_options
try:
exit_code, out, err = mgr.send_rgwadmin_command(rgw_zone_create_cmd)
if exit_code > 0:
raise DashboardException(e=err, msg='Unable to create zone',
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
self.update_period()
return out
def parse_secrets(self, user, data):
for key in data.get('keys', []):
if key.get('user') == user:
access_key = key.get('access_key')
secret_key = key.get('secret_key')
return access_key, secret_key
return '', ''
def modify_zone(self, zone_name: str, zonegroup_name: str, default: str, master: str,
endpoints: str, access_key: str, secret_key: str):
rgw_zone_modify_cmd = ['zone', 'modify', '--rgw-zonegroup',
zonegroup_name, '--rgw-zone', zone_name]
if endpoints:
rgw_zone_modify_cmd.append('--endpoints')
rgw_zone_modify_cmd.append(endpoints)
if default and str_to_bool(default):
rgw_zone_modify_cmd.append('--default')
if master and str_to_bool(master):
rgw_zone_modify_cmd.append('--master')
if access_key is not None:
rgw_zone_modify_cmd.append('--access-key')
rgw_zone_modify_cmd.append(access_key)
if secret_key is not None:
rgw_zone_modify_cmd.append('--secret')
rgw_zone_modify_cmd.append(secret_key)
try:
exit_code, _, err = mgr.send_rgwadmin_command(rgw_zone_modify_cmd)
if exit_code > 0:
raise DashboardException(e=err, msg='Unable to modify zone',
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
self.update_period()
def add_placement_targets_zone(self, zone_name: str, placement_target: str, data_pool: str,
index_pool: str, data_extra_pool: str):
rgw_zone_add_placement_cmd = ['zone', 'placement', 'add', '--rgw-zone', zone_name,
'--placement-id', placement_target, '--data-pool', data_pool,
'--index-pool', index_pool,
'--data-extra-pool', data_extra_pool]
try:
exit_code, _, err = mgr.send_rgwadmin_command(rgw_zone_add_placement_cmd)
if exit_code > 0:
raise DashboardException(e=err, msg='Unable to add placement target {} to zone {}'.format(placement_target, zone_name), # noqa E501 #pylint: disable=line-too-long
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
self.update_period()
def add_storage_class_zone(self, zone_name: str, placement_target: str, storage_class: str,
data_pool: str, compression: str):
rgw_zone_add_storage_class_cmd = ['zone', 'placement', 'add', '--rgw-zone', zone_name,
'--placement-id', placement_target,
'--storage-class', storage_class,
'--data-pool', data_pool,
'--compression', compression]
try:
exit_code, _, err = mgr.send_rgwadmin_command(rgw_zone_add_storage_class_cmd)
if exit_code > 0:
raise DashboardException(e=err, msg='Unable to add storage class {} to zone {}'.format(storage_class, zone_name), # noqa E501 #pylint: disable=line-too-long
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
self.update_period()
def edit_zone(self, zone_name: str, new_zone_name: str, zonegroup_name: str, default: str = '',
master: str = '', endpoints: str = '', access_key: str = '', secret_key: str = '',
placement_target: str = '', data_pool: str = '', index_pool: str = '',
data_extra_pool: str = '', storage_class: str = '', data_pool_class: str = '',
compression: str = ''):
if new_zone_name != zone_name:
rgw_zone_rename_cmd = ['zone', 'rename', '--rgw-zone',
zone_name, '--zone-new-name', new_zone_name]
try:
exit_code, _, err = mgr.send_rgwadmin_command(rgw_zone_rename_cmd, False)
if exit_code > 0:
raise DashboardException(e=err, msg='Unable to rename zone to {}'.format(new_zone_name), # noqa E501 #pylint: disable=line-too-long
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
self.update_period()
self.modify_zone(new_zone_name, zonegroup_name, default, master, endpoints, access_key,
secret_key)
self.add_placement_targets_zone(new_zone_name, placement_target,
data_pool, index_pool, data_extra_pool)
self.add_storage_class_zone(new_zone_name, placement_target, storage_class,
data_pool_class, compression)
def list_zones(self):
rgw_zone_list = {}
rgw_zone_list_cmd = ['zone', 'list']
try:
exit_code, out, _ = mgr.send_rgwadmin_command(rgw_zone_list_cmd)
if exit_code > 0:
raise DashboardException(msg='Unable to fetch zone list',
http_status_code=500, component='rgw')
rgw_zone_list = out
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
return rgw_zone_list
def get_zone(self, zone_name: str):
zone_info = {}
rgw_zone_info_cmd = ['zone', 'get', '--rgw-zone', zone_name]
try:
exit_code, out, _ = mgr.send_rgwadmin_command(rgw_zone_info_cmd)
if exit_code > 0:
raise DashboardException('Unable to get zone info',
http_status_code=500, component='rgw')
zone_info = out
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
return zone_info
def get_all_zones_info(self):
all_zones_info = {}
zones_info = []
rgw_zone_list = self.list_zones()
if 'zones' in rgw_zone_list:
if rgw_zone_list['zones'] != []:
for rgw_zone in rgw_zone_list['zones']:
zone_info = self.get_zone(rgw_zone)
zones_info.append(zone_info)
all_zones_info['zones'] = zones_info # type: ignore
else:
all_zones_info['zones'] = []
if 'default_info' in rgw_zone_list and rgw_zone_list['default_info'] != '':
all_zones_info['default_zone'] = rgw_zone_list['default_info'] # type: ignore
else:
all_zones_info['default_zone'] = '' # type: ignore
return all_zones_info
def delete_zone(self, zone_name: str, delete_pools: str, pools: List[str],
zonegroup_name: str = '',):
rgw_remove_zone_from_zonegroup_cmd = ['zonegroup', 'remove', '--rgw-zonegroup',
zonegroup_name, '--rgw-zone', zone_name]
rgw_delete_zone_cmd = ['zone', 'delete', '--rgw-zone', zone_name]
if zonegroup_name:
try:
exit_code, _, _ = mgr.send_rgwadmin_command(rgw_remove_zone_from_zonegroup_cmd)
if exit_code > 0:
raise DashboardException(msg='Unable to remove zone from zonegroup',
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
self.update_period()
try:
exit_code, _, _ = mgr.send_rgwadmin_command(rgw_delete_zone_cmd)
if exit_code > 0:
raise DashboardException(msg='Unable to delete zone',
http_status_code=500, component='rgw')
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
self.update_period()
if delete_pools == 'true':
self.delete_pools(pools)
def delete_pools(self, pools):
for pool in pools:
if mgr.rados.pool_exists(pool):
mgr.rados.delete_pool(pool)
def create_system_user(self, userName: str, zoneName: str):
rgw_user_create_cmd = ['user', 'create', '--uid', userName,
'--display-name', userName, '--rgw-zone', zoneName, '--system']
try:
exit_code, out, _ = mgr.send_rgwadmin_command(rgw_user_create_cmd)
if exit_code > 0:
raise DashboardException(msg='Unable to create system user',
http_status_code=500, component='rgw')
return out
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
def get_user_list(self, zoneName: str):
all_users_info = []
user_list = []
rgw_user_list_cmd = ['user', 'list', '--rgw-zone', zoneName]
try:
exit_code, out, _ = mgr.send_rgwadmin_command(rgw_user_list_cmd)
if exit_code > 0:
raise DashboardException('Unable to get user list',
http_status_code=500, component='rgw')
user_list = out
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
if len(user_list) > 0:
for user_name in user_list:
rgw_user_info_cmd = ['user', 'info', '--uid', user_name, '--rgw-zone', zoneName]
try:
exit_code, out, _ = mgr.send_rgwadmin_command(rgw_user_info_cmd)
if exit_code > 0:
raise DashboardException('Unable to get user info',
http_status_code=500, component='rgw')
all_users_info.append(out)
except SubprocessError as error:
raise DashboardException(error, http_status_code=500, component='rgw')
return all_users_info
def get_multisite_status(self):
is_multisite_configured = True
rgw_realm_list = self.list_realms()
rgw_zonegroup_list = self.list_zonegroups()
rgw_zone_list = self.list_zones()
if len(rgw_realm_list['realms']) < 1 and len(rgw_zonegroup_list['zonegroups']) < 1 \
and len(rgw_zone_list['zones']) < 1:
is_multisite_configured = False
return is_multisite_configured
| 72,846 | 46.581319 | 211 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/services/settings.py
|
# -*- coding: utf-8 -*-
from contextlib import contextmanager
import cherrypy
class SettingsService:
@contextmanager
# pylint: disable=no-self-argument
def attribute_handler(name):
"""
:type name: str|dict[str, str]
:rtype: str|dict[str, str]
"""
if isinstance(name, dict):
result = {
_to_native(key): value
for key, value in name.items()
}
else:
result = _to_native(name)
try:
yield result
except AttributeError: # pragma: no cover - handling is too obvious
raise cherrypy.NotFound(result) # pragma: no cover - handling is too obvious
def _to_native(setting):
return setting.upper().replace('-', '_')
| 782 | 24.258065 | 89 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/services/sso.py
|
# -*- coding: utf-8 -*-
# pylint: disable=too-many-return-statements,too-many-branches
import errno
import json
import logging
import os
import threading
import warnings
from urllib import parse
from .. import mgr
from ..tools import prepare_url_prefix
logger = logging.getLogger('sso')
try:
from onelogin.saml2.errors import OneLogin_Saml2_Error as Saml2Error
from onelogin.saml2.idp_metadata_parser import OneLogin_Saml2_IdPMetadataParser as Saml2Parser
from onelogin.saml2.settings import OneLogin_Saml2_Settings as Saml2Settings
python_saml_imported = True
except ImportError:
python_saml_imported = False
class Saml2(object):
def __init__(self, onelogin_settings):
self.onelogin_settings = onelogin_settings
def get_username_attribute(self):
return self.onelogin_settings['sp']['attributeConsumingService']['requestedAttributes'][0][
'name']
def to_dict(self):
return {
'onelogin_settings': self.onelogin_settings
}
@classmethod
def from_dict(cls, s_dict):
return Saml2(s_dict['onelogin_settings'])
class SsoDB(object):
VERSION = 1
SSODB_CONFIG_KEY = "ssodb_v"
def __init__(self, version, protocol, saml2):
self.version = version
self.protocol = protocol
self.saml2 = saml2
self.lock = threading.RLock()
def save(self):
with self.lock:
db = {
'protocol': self.protocol,
'saml2': self.saml2.to_dict(),
'version': self.version
}
mgr.set_store(self.ssodb_config_key(), json.dumps(db))
@classmethod
def ssodb_config_key(cls, version=None):
if version is None:
version = cls.VERSION
return "{}{}".format(cls.SSODB_CONFIG_KEY, version)
def check_and_update_db(self):
logger.debug("Checking for previous DB versions")
if self.VERSION != 1:
raise NotImplementedError()
@classmethod
def load(cls):
logger.info("Loading SSO DB version=%s", cls.VERSION)
json_db = mgr.get_store(cls.ssodb_config_key(), None)
if json_db is None:
logger.debug("No DB v%s found, creating new...", cls.VERSION)
db = cls(cls.VERSION, '', Saml2({}))
# check if we can update from a previous version database
db.check_and_update_db()
return db
dict_db = json.loads(json_db) # type: dict
return cls(dict_db['version'], dict_db.get('protocol'),
Saml2.from_dict(dict_db.get('saml2')))
def load_sso_db():
mgr.SSO_DB = SsoDB.load() # type: ignore
SSO_COMMANDS = [
{
'cmd': 'dashboard sso enable saml2',
'desc': 'Enable SAML2 Single Sign-On',
'perm': 'w'
},
{
'cmd': 'dashboard sso disable',
'desc': 'Disable Single Sign-On',
'perm': 'w'
},
{
'cmd': 'dashboard sso status',
'desc': 'Get Single Sign-On status',
'perm': 'r'
},
{
'cmd': 'dashboard sso show saml2',
'desc': 'Show SAML2 configuration',
'perm': 'r'
},
{
'cmd': 'dashboard sso setup saml2 '
'name=ceph_dashboard_base_url,type=CephString '
'name=idp_metadata,type=CephString '
'name=idp_username_attribute,type=CephString,req=false '
'name=idp_entity_id,type=CephString,req=false '
'name=sp_x_509_cert,type=CephFilepath,req=false '
'name=sp_private_key,type=CephFilepath,req=false',
'desc': 'Setup SAML2 Single Sign-On',
'perm': 'w'
}
]
def _get_optional_attr(cmd, attr, default):
if attr in cmd:
if cmd[attr] != '':
return cmd[attr]
return default
def handle_sso_command(cmd):
ret = -errno.ENOSYS, '', ''
if cmd['prefix'] not in ['dashboard sso enable saml2',
'dashboard sso disable',
'dashboard sso status',
'dashboard sso show saml2',
'dashboard sso setup saml2']:
return -errno.ENOSYS, '', ''
if not python_saml_imported:
return -errno.EPERM, '', 'Required library not found: `python3-saml`'
if cmd['prefix'] == 'dashboard sso disable':
mgr.SSO_DB.protocol = ''
mgr.SSO_DB.save()
return 0, 'SSO is "disabled".', ''
if cmd['prefix'] == 'dashboard sso enable saml2':
configured = _is_sso_configured()
if configured:
mgr.SSO_DB.protocol = 'saml2'
mgr.SSO_DB.save()
return 0, 'SSO is "enabled" with "SAML2" protocol.', ''
return -errno.EPERM, '', 'Single Sign-On is not configured: ' \
'use `ceph dashboard sso setup saml2`'
if cmd['prefix'] == 'dashboard sso status':
if mgr.SSO_DB.protocol == 'saml2':
return 0, 'SSO is "enabled" with "SAML2" protocol.', ''
return 0, 'SSO is "disabled".', ''
if cmd['prefix'] == 'dashboard sso show saml2':
return 0, json.dumps(mgr.SSO_DB.saml2.to_dict()), ''
if cmd['prefix'] == 'dashboard sso setup saml2':
ret = _handle_saml_setup(cmd)
return ret
return -errno.ENOSYS, '', ''
def _is_sso_configured():
configured = True
try:
Saml2Settings(mgr.SSO_DB.saml2.onelogin_settings)
except Saml2Error:
configured = False
return configured
def _handle_saml_setup(cmd):
err, sp_x_509_cert, sp_private_key, has_sp_cert = _read_saml_files(cmd)
if err:
ret = -errno.EINVAL, '', err
else:
_set_saml_settings(cmd, sp_x_509_cert, sp_private_key, has_sp_cert)
ret = 0, json.dumps(mgr.SSO_DB.saml2.onelogin_settings), ''
return ret
def _read_saml_files(cmd):
sp_x_509_cert_path = _get_optional_attr(cmd, 'sp_x_509_cert', '')
sp_private_key_path = _get_optional_attr(cmd, 'sp_private_key', '')
has_sp_cert = sp_x_509_cert_path != "" and sp_private_key_path != ""
sp_x_509_cert = ''
sp_private_key = ''
err = None
if sp_x_509_cert_path and not sp_private_key_path:
err = 'Missing parameter `sp_private_key`.'
elif not sp_x_509_cert_path and sp_private_key_path:
err = 'Missing parameter `sp_x_509_cert`.'
elif has_sp_cert:
sp_x_509_cert, err = _try_read_file(sp_x_509_cert_path)
sp_private_key, err = _try_read_file(sp_private_key_path)
return err, sp_x_509_cert, sp_private_key, has_sp_cert
def _try_read_file(path):
res = ""
ret = ""
try:
with open(path, 'r', encoding='utf-8') as f:
res = f.read()
except FileNotFoundError:
ret = '`{}` not found.'.format(path)
return res, ret
def _set_saml_settings(cmd, sp_x_509_cert, sp_private_key, has_sp_cert):
ceph_dashboard_base_url = cmd['ceph_dashboard_base_url']
idp_metadata = cmd['idp_metadata']
idp_username_attribute = _get_optional_attr(
cmd, 'idp_username_attribute', 'uid')
idp_entity_id = _get_optional_attr(cmd, 'idp_entity_id', None)
idp_settings = _parse_saml_settings(idp_metadata, idp_entity_id)
url_prefix = prepare_url_prefix(
mgr.get_module_option('url_prefix', default=''))
settings = {
'sp': {
'entityId': '{}{}/auth/saml2/metadata'.format(ceph_dashboard_base_url, url_prefix),
'assertionConsumerService': {
'url': '{}{}/auth/saml2'.format(ceph_dashboard_base_url, url_prefix),
'binding': "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST"
},
'attributeConsumingService': {
'serviceName': "Ceph Dashboard",
"serviceDescription": "Ceph Dashboard Service",
"requestedAttributes": [
{
"name": idp_username_attribute,
"isRequired": True
}
]
},
'singleLogoutService': {
'url': '{}{}/auth/saml2/logout'.format(ceph_dashboard_base_url, url_prefix),
'binding': 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect'
},
"x509cert": sp_x_509_cert,
"privateKey": sp_private_key
},
'security': {
"nameIdEncrypted": has_sp_cert,
"authnRequestsSigned": has_sp_cert,
"logoutRequestSigned": has_sp_cert,
"logoutResponseSigned": has_sp_cert,
"signMetadata": has_sp_cert,
"wantMessagesSigned": has_sp_cert,
"wantAssertionsSigned": has_sp_cert,
"wantAssertionsEncrypted": has_sp_cert,
# Not all Identity Providers support this.
"wantNameIdEncrypted": False,
"metadataValidUntil": '',
"wantAttributeStatement": False
}
}
settings = Saml2Parser.merge_settings(settings, idp_settings)
mgr.SSO_DB.saml2.onelogin_settings = settings
mgr.SSO_DB.protocol = 'saml2'
mgr.SSO_DB.save()
def _parse_saml_settings(idp_metadata, idp_entity_id):
if os.path.isfile(idp_metadata):
warnings.warn(
"Please prepend 'file://' to indicate a local SAML2 IdP file", DeprecationWarning)
with open(idp_metadata, 'r', encoding='utf-8') as f:
idp_settings = Saml2Parser.parse(f.read(), entity_id=idp_entity_id)
elif parse.urlparse(idp_metadata)[0] in ('http', 'https', 'file'):
idp_settings = Saml2Parser.parse_remote(
url=idp_metadata, validate_cert=False, entity_id=idp_entity_id)
else:
idp_settings = Saml2Parser.parse(idp_metadata, entity_id=idp_entity_id)
return idp_settings
| 9,765 | 32.217687 | 99 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/services/tcmu_service.py
|
from mgr_util import get_most_recent_rate
from dashboard.services.ceph_service import CephService
from .. import mgr
try:
from typing import Dict
except ImportError:
pass # Just for type checking
SERVICE_TYPE = 'tcmu-runner'
class TcmuService(object):
# pylint: disable=too-many-nested-blocks
# pylint: disable=too-many-branches
@staticmethod
def get_iscsi_info():
daemons = {} # type: Dict[str, dict]
images = {} # type: Dict[str, dict]
daemon = None
for service in CephService.get_service_list(SERVICE_TYPE):
metadata = service['metadata']
if metadata is None:
continue
status = service['status']
hostname = service['hostname']
daemon = daemons.get(hostname, None)
if daemon is None:
daemon = {
'server_hostname': hostname,
'version': metadata['ceph_version'],
'optimized_paths': 0,
'non_optimized_paths': 0
}
daemons[hostname] = daemon
service_id = service['id']
device_id = service_id.split(':')[-1]
image = images.get(device_id)
if image is None:
image = {
'device_id': device_id,
'pool_name': metadata['pool_name'],
'name': metadata['image_name'],
'id': metadata.get('image_id', None),
'optimized_paths': [],
'non_optimized_paths': []
}
images[device_id] = image
if status.get('lock_owner', 'false') == 'true':
daemon['optimized_paths'] += 1
image['optimized_paths'].append(hostname)
perf_key_prefix = "librbd-{id}-{pool}-{name}.".format(
id=metadata.get('image_id', ''),
pool=metadata['pool_name'],
name=metadata['image_name'])
perf_key = "{}lock_acquired_time".format(perf_key_prefix)
perf_value = mgr.get_counter('tcmu-runner',
service_id,
perf_key)[perf_key]
if perf_value:
lock_acquired_time = perf_value[-1][1] / 1000000000
else:
lock_acquired_time = 0
if lock_acquired_time > image.get('optimized_since', 0):
image['optimized_daemon'] = hostname
image['optimized_since'] = lock_acquired_time
image['stats'] = {}
image['stats_history'] = {}
for s in ['rd', 'wr', 'rd_bytes', 'wr_bytes']:
perf_key = "{}{}".format(perf_key_prefix, s)
rates = CephService.get_rates('tcmu-runner', service_id, perf_key)
image['stats'][s] = get_most_recent_rate(rates)
image['stats_history'][s] = rates
else:
daemon['non_optimized_paths'] += 1
image['non_optimized_paths'].append(hostname)
# clear up races w/ tcmu-runner clients that haven't detected
# loss of optimized path
TcmuService.remove_undetected_clients(images, daemons, daemon)
return {
'daemons': sorted(daemons.values(),
key=lambda d: d['server_hostname']),
'images': sorted(images.values(), key=lambda i: ['id']),
}
@staticmethod
def get_image_info(pool_name, image_name, get_iscsi_info):
for image in get_iscsi_info['images']:
if image['pool_name'] == pool_name and image['name'] == image_name:
return image
return None
@staticmethod
def remove_undetected_clients(images, daemons, daemon):
for image in images.values():
optimized_daemon = image.get('optimized_daemon', None)
if optimized_daemon:
for daemon_name in image['optimized_paths']:
if daemon_name != optimized_daemon:
daemon = daemons[daemon_name]
daemon['optimized_paths'] -= 1
daemon['non_optimized_paths'] += 1
image['non_optimized_paths'].append(daemon_name)
image['optimized_paths'] = [optimized_daemon]
| 4,535 | 38.789474 | 90 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/tests/__init__.py
|
# -*- coding: utf-8 -*-
# pylint: disable=too-many-arguments
import contextlib
import json
import logging
import threading
import time
from typing import Any, Dict, List, Optional
from unittest import mock
from unittest.mock import Mock
import cherrypy
from cherrypy._cptools import HandlerWrapperTool
from cherrypy.test import helper
from mgr_module import HandleCommandResult
from orchestrator import DaemonDescription, HostSpec, InventoryHost
from pyfakefs import fake_filesystem
from .. import mgr
from ..controllers import generate_controller_routes, json_error_page
from ..controllers._version import APIVersion
from ..module import Module
from ..plugins import PLUGIN_MANAGER, debug, feature_toggles # noqa
from ..services.auth import AuthManagerTool
from ..services.exception import dashboard_exception_handler
from ..tools import RequestLoggingTool
PLUGIN_MANAGER.hook.init()
PLUGIN_MANAGER.hook.register_commands()
logger = logging.getLogger('tests')
class ModuleTestClass(Module):
"""Dashboard module subclass for testing the module methods."""
def __init__(self) -> None:
pass
def _unconfigure_logging(self) -> None:
pass
class CmdException(Exception):
def __init__(self, retcode, message):
super(CmdException, self).__init__(message)
self.retcode = retcode
class KVStoreMockMixin(object):
CONFIG_KEY_DICT = {}
@classmethod
def mock_set_module_option(cls, attr, val):
cls.CONFIG_KEY_DICT[attr] = val
@classmethod
def mock_get_module_option(cls, attr, default=None):
return cls.CONFIG_KEY_DICT.get(attr, default)
@classmethod
def mock_kv_store(cls):
cls.CONFIG_KEY_DICT.clear()
mgr.set_module_option.side_effect = cls.mock_set_module_option
mgr.get_module_option.side_effect = cls.mock_get_module_option
# kludge below
mgr.set_store.side_effect = cls.mock_set_module_option
mgr.get_store.side_effect = cls.mock_get_module_option
@classmethod
def get_key(cls, key):
return cls.CONFIG_KEY_DICT.get(key, None)
# pylint: disable=protected-access
class CLICommandTestMixin(KVStoreMockMixin):
_dashboard_module = ModuleTestClass()
@classmethod
def exec_cmd(cls, cmd, **kwargs):
inbuf = kwargs['inbuf'] if 'inbuf' in kwargs else None
cmd_dict = {'prefix': 'dashboard {}'.format(cmd)}
cmd_dict.update(kwargs)
result = HandleCommandResult(*cls._dashboard_module._handle_command(inbuf, cmd_dict))
if result.retval < 0:
raise CmdException(result.retval, result.stderr)
try:
return json.loads(result.stdout)
except ValueError:
return result.stdout
class FakeFsMixin(object):
fs = fake_filesystem.FakeFilesystem()
f_open = fake_filesystem.FakeFileOpen(fs)
f_os = fake_filesystem.FakeOsModule(fs)
builtins_open = 'builtins.open'
class ControllerTestCase(helper.CPWebCase):
_endpoints_cache = {}
@classmethod
def setup_controllers(cls, ctrl_classes, base_url='', cp_config: Dict[str, Any] = None):
if not isinstance(ctrl_classes, list):
ctrl_classes = [ctrl_classes]
mapper = cherrypy.dispatch.RoutesDispatcher()
endpoint_list = []
for ctrl in ctrl_classes:
ctrl._cp_config = {
'tools.dashboard_exception_handler.on': True,
'tools.authenticate.on': False
}
if cp_config:
ctrl._cp_config.update(cp_config)
inst = ctrl()
# We need to cache the controller endpoints because
# BaseController#endpoints method is not idempontent
# and a controller might be needed by more than one
# unit test.
if ctrl not in cls._endpoints_cache:
ctrl_endpoints = ctrl.endpoints()
cls._endpoints_cache[ctrl] = ctrl_endpoints
ctrl_endpoints = cls._endpoints_cache[ctrl]
for endpoint in ctrl_endpoints:
endpoint.inst = inst
endpoint_list.append(endpoint)
endpoint_list = sorted(endpoint_list, key=lambda e: e.url)
for endpoint in endpoint_list:
generate_controller_routes(endpoint, mapper, base_url)
if base_url == '':
base_url = '/'
cherrypy.tree.mount(None, config={
base_url: {'request.dispatch': mapper}})
@classmethod
def setup_crud_controllers(cls, crud_ctrl_classes, base_url='',
cp_config: Dict[str, Any] = None):
if crud_ctrl_classes and not isinstance(crud_ctrl_classes, list):
crud_ctrl_classes = [crud_ctrl_classes]
ctrl_classes = []
for ctrl in crud_ctrl_classes:
ctrl_classes.append(ctrl.CRUDClass)
ctrl_classes.append(ctrl.CRUDClassMetadata)
cls.setup_controllers(ctrl_classes, base_url=base_url, cp_config=cp_config)
_request_logging = False
@classmethod
def setUpClass(cls):
super().setUpClass()
cherrypy.tools.authenticate = AuthManagerTool()
cherrypy.tools.dashboard_exception_handler = HandlerWrapperTool(dashboard_exception_handler,
priority=31)
cherrypy.config.update({
'error_page.default': json_error_page,
'tools.json_in.on': True,
'tools.json_in.force': False
})
PLUGIN_MANAGER.hook.configure_cherrypy(config=cherrypy.config)
if cls._request_logging:
cherrypy.tools.request_logging = RequestLoggingTool()
cherrypy.config.update({'tools.request_logging.on': True})
@classmethod
def tearDownClass(cls):
if cls._request_logging:
cherrypy.config.update({'tools.request_logging.on': False})
def _request(self, url, method, data=None, headers=None, version=APIVersion.DEFAULT):
if not data:
b = None
if version:
h = [('Accept', version.to_mime_type()),
('Content-Length', '0')]
else:
h = None
else:
b = json.dumps(data)
if version is not None:
h = [('Accept', version.to_mime_type()),
('Content-Type', 'application/json'),
('Content-Length', str(len(b)))]
else:
h = [('Content-Type', 'application/json'),
('Content-Length', str(len(b)))]
if headers:
h = headers
self.getPage(url, method=method, body=b, headers=h)
def _get(self, url, headers=None, version=APIVersion.DEFAULT):
self._request(url, 'GET', headers=headers, version=version)
def _post(self, url, data=None, version=APIVersion.DEFAULT):
self._request(url, 'POST', data, version=version)
def _delete(self, url, data=None, version=APIVersion.DEFAULT):
self._request(url, 'DELETE', data, version=version)
def _put(self, url, data=None, version=APIVersion.DEFAULT):
self._request(url, 'PUT', data, version=version)
def _task_request(self, method, url, data, timeout, version=APIVersion.DEFAULT):
self._request(url, method, data, version=version)
if self.status != '202 Accepted':
logger.info("task finished immediately")
return
res = self.json_body()
self.assertIsInstance(res, dict)
self.assertIn('name', res)
self.assertIn('metadata', res)
task_name = res['name']
task_metadata = res['metadata']
thread = Waiter(task_name, task_metadata, self, version)
thread.start()
status = thread.ev.wait(timeout)
if not status:
# timeout expired
thread.abort = True
thread.join()
raise Exception("Waiting for task ({}, {}) to finish timed out"
.format(task_name, task_metadata))
logger.info("task (%s, %s) finished", task_name, task_metadata)
if thread.res_task['success']:
self.body = json.dumps(thread.res_task['ret_value'])
self._set_success_status(method)
else:
if 'status' in thread.res_task['exception']:
self.status = thread.res_task['exception']['status']
else:
self.status = 500
self.body = json.dumps(thread.res_task['exception'])
def _set_success_status(self, method):
if method == 'POST':
self.status = '201 Created'
elif method == 'PUT':
self.status = '200 OK'
elif method == 'DELETE':
self.status = '204 No Content'
def _task_post(self, url, data=None, timeout=60, version=APIVersion.DEFAULT):
self._task_request('POST', url, data, timeout, version=version)
def _task_delete(self, url, timeout=60, version=APIVersion.DEFAULT):
self._task_request('DELETE', url, None, timeout, version=version)
def _task_put(self, url, data=None, timeout=60, version=APIVersion.DEFAULT):
self._task_request('PUT', url, data, timeout, version=version)
def json_body(self):
body_str = self.body.decode('utf-8') if isinstance(self.body, bytes) else self.body
return json.loads(body_str)
def assertJsonBody(self, data, msg=None): # noqa: N802
"""Fail if value != self.body."""
json_body = self.json_body()
if data != json_body:
if msg is None:
msg = 'expected body:\n%r\n\nactual body:\n%r' % (
data, json_body)
self._handlewebError(msg)
def assertInJsonBody(self, data, msg=None): # noqa: N802
json_body = self.json_body()
if data not in json_body:
if msg is None:
msg = 'expected %r to be in %r' % (data, json_body)
self._handlewebError(msg)
class Stub:
"""Test class for returning predefined values"""
@classmethod
def get_mgr_no_services(cls):
mgr.get = Mock(return_value={})
class RgwStub(Stub):
@classmethod
def get_daemons(cls):
mgr.get = Mock(return_value={'services': {'rgw': {'daemons': {
'5297': {
'addr': '192.168.178.3:49774/1534999298',
'metadata': {
'frontend_config#0': 'beast port=8000',
'id': 'daemon1',
'realm_name': 'realm1',
'zonegroup_name': 'zonegroup1',
'zone_name': 'zone1',
'hostname': 'daemon1.server.lan'
}
},
'5398': {
'addr': '[2001:db8:85a3::8a2e:370:7334]:49774/1534999298',
'metadata': {
'frontend_config#0': 'civetweb port=8002',
'id': 'daemon2',
'realm_name': 'realm2',
'zonegroup_name': 'zonegroup2',
'zone_name': 'zone2',
'hostname': 'daemon2.server.lan'
}
}
}}}})
@classmethod
def get_settings(cls):
settings = {
'RGW_API_ACCESS_KEY': 'fake-access-key',
'RGW_API_SECRET_KEY': 'fake-secret-key',
}
mgr.get_module_option = Mock(side_effect=settings.get)
# pylint: disable=protected-access
class Waiter(threading.Thread):
def __init__(self, task_name, task_metadata, tc, version):
super(Waiter, self).__init__()
self.task_name = task_name
self.task_metadata = task_metadata
self.ev = threading.Event()
self.abort = False
self.res_task = None
self.tc = tc
self.version = version
def run(self):
running = True
while running and not self.abort:
logger.info("task (%s, %s) is still executing", self.task_name,
self.task_metadata)
time.sleep(1)
self.tc._get('/api/task?name={}'.format(self.task_name), version=self.version)
res = self.tc.json_body()
for task in res['finished_tasks']:
if task['metadata'] == self.task_metadata:
# task finished
running = False
self.res_task = task
self.ev.set()
@contextlib.contextmanager
def patch_orch(available: bool, missing_features: Optional[List[str]] = None,
hosts: Optional[List[HostSpec]] = None,
inventory: Optional[List[dict]] = None,
daemons: Optional[List[DaemonDescription]] = None):
with mock.patch('dashboard.controllers.orchestrator.OrchClient.instance') as instance:
fake_client = mock.Mock()
fake_client.available.return_value = available
fake_client.get_missing_features.return_value = missing_features
if not daemons:
daemons = [
DaemonDescription(
daemon_type='mon',
daemon_id='a',
hostname='node0'
)
]
fake_client.services.list_daemons.return_value = daemons
if hosts is not None:
fake_client.hosts.list.return_value = hosts
if inventory is not None:
def _list_inventory(hosts=None, refresh=False): # pylint: disable=unused-argument
inv_hosts = []
for inv_host in inventory:
if hosts is None or inv_host['name'] in hosts:
inv_hosts.append(InventoryHost.from_json(inv_host))
return inv_hosts
fake_client.inventory.list.side_effect = _list_inventory
instance.return_value = fake_client
yield fake_client
| 13,890 | 34.436224 | 100 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/tests/helper.py
|
# -*- coding: utf-8 -*-
try:
from typing import Any, Dict
except ImportError:
pass
def update_dict(data, update_data):
# type: (Dict[Any, Any], Dict[Any, Any]) -> Dict[Any]
""" Update a dictionary recursively.
Eases doing so by providing the option to separate the key to be updated by dot characters. If
a key provided does not exist, it will raise an KeyError instead of just updating the
dictionary.
Limitations
Please note that the functionality provided by this method can only be used if the dictionary to
be updated (`data`) does not contain dot characters in its keys.
:raises KeyError:
>>> update_dict({'foo': {'bar': 5}}, {'foo.bar': 10})
{'foo': {'bar': 10}}
>>> update_dict({'foo': {'bar': 5}}, {'xyz': 10})
Traceback (most recent call last):
...
KeyError: 'xyz'
>>> update_dict({'foo': {'bar': 5}}, {'foo.xyz': 10})
Traceback (most recent call last):
...
KeyError: 'xyz'
"""
for k, v in update_data.items():
keys = k.split('.')
element = None
for i, key in enumerate(keys):
last = False
if len(keys) == i + 1:
last = True
if not element:
element = data[key]
elif not last:
element = element[key] # pylint: disable=unsubscriptable-object
if last:
if key not in element:
raise KeyError(key)
element[key] = v
return data
| 1,534 | 26.410714 | 100 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/tests/test_access_control.py
|
# -*- coding: utf-8 -*-
# pylint: disable=dangerous-default-value,too-many-public-methods
import errno
import json
import tempfile
import time
import unittest
from datetime import datetime, timedelta
from mgr_module import ERROR_MSG_EMPTY_INPUT_FILE
from .. import mgr
from ..security import Permission, Scope
from ..services.access_control import SYSTEM_ROLES, AccessControlDB, \
PasswordPolicy, load_access_control_db, password_hash
from ..settings import Settings
from ..tests import CLICommandTestMixin, CmdException
class AccessControlTest(unittest.TestCase, CLICommandTestMixin):
@classmethod
def setUpClass(cls):
cls.mock_kv_store()
mgr.ACCESS_CONTROL_DB = None
def setUp(self):
self.CONFIG_KEY_DICT.clear()
load_access_control_db()
def load_persistent_db(self):
config_key = AccessControlDB.accessdb_config_key()
self.assertIn(config_key, self.CONFIG_KEY_DICT)
db_json = self.CONFIG_KEY_DICT[config_key]
db = json.loads(db_json)
return db
# The DB is written to persistent storage the first time it is saved.
# However, should an operation fail due to <reasons>, we may end up in
# a state where we have a completely empty CONFIG_KEY_DICT (our mock
# equivalent to the persistent state). While this works for most of the
# tests in this class, that would prevent us from testing things like
# "run a command that is expected to fail, and then ensure nothing
# happened", because we'd be asserting in `load_persistent_db()` due to
# the map being empty.
#
# This function will therefore force state to be written to our mock
# persistent state. We could have added this extra step to
# `load_persistent_db()` directly, but that would conflict with the
# upgrade tests. This way, we can selectively enforce this requirement
# where we believe it to be necessary; generically speaking, this should
# not be needed unless we're testing very specific behaviors.
#
def setup_and_load_persistent_db(self):
mgr.ACCESS_CTRL_DB.save()
self.load_persistent_db()
def validate_persistent_role(self, rolename, scopes_permissions,
description=None):
db = self.load_persistent_db()
self.assertIn('roles', db)
self.assertIn(rolename, db['roles'])
self.assertEqual(db['roles'][rolename]['name'], rolename)
self.assertEqual(db['roles'][rolename]['description'], description)
self.assertDictEqual(db['roles'][rolename]['scopes_permissions'],
scopes_permissions)
def validate_persistent_no_role(self, rolename):
db = self.load_persistent_db()
self.assertIn('roles', db)
self.assertNotIn(rolename, db['roles'])
def validate_persistent_user(self, username, roles, password=None,
name=None, email=None, last_update=None,
enabled=True, pwdExpirationDate=None):
db = self.load_persistent_db()
self.assertIn('users', db)
self.assertIn(username, db['users'])
self.assertEqual(db['users'][username]['username'], username)
self.assertListEqual(db['users'][username]['roles'], roles)
if password:
self.assertEqual(db['users'][username]['password'], password)
if name:
self.assertEqual(db['users'][username]['name'], name)
if email:
self.assertEqual(db['users'][username]['email'], email)
if last_update:
self.assertEqual(db['users'][username]['lastUpdate'], last_update)
if pwdExpirationDate:
self.assertEqual(db['users'][username]['pwdExpirationDate'], pwdExpirationDate)
self.assertEqual(db['users'][username]['enabled'], enabled)
def validate_persistent_no_user(self, username):
db = self.load_persistent_db()
self.assertIn('users', db)
self.assertNotIn(username, db['users'])
def test_create_role(self):
role = self.exec_cmd('ac-role-create', rolename='test_role')
self.assertDictEqual(role, {'name': 'test_role', 'description': None,
'scopes_permissions': {}})
self.validate_persistent_role('test_role', {})
def test_create_role_with_desc(self):
role = self.exec_cmd('ac-role-create', rolename='test_role',
description='Test Role')
self.assertDictEqual(role, {'name': 'test_role',
'description': 'Test Role',
'scopes_permissions': {}})
self.validate_persistent_role('test_role', {}, 'Test Role')
def test_create_duplicate_role(self):
self.test_create_role()
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-role-create', rolename='test_role')
self.assertEqual(ctx.exception.retcode, -errno.EEXIST)
self.assertEqual(str(ctx.exception), "Role 'test_role' already exists")
def test_delete_role(self):
self.test_create_role()
out = self.exec_cmd('ac-role-delete', rolename='test_role')
self.assertEqual(out, "Role 'test_role' deleted")
self.validate_persistent_no_role('test_role')
def test_delete_nonexistent_role(self):
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-role-delete', rolename='test_role')
self.assertEqual(ctx.exception.retcode, -errno.ENOENT)
self.assertEqual(str(ctx.exception), "Role 'test_role' does not exist")
def test_show_single_role(self):
self.test_create_role()
role = self.exec_cmd('ac-role-show', rolename='test_role')
self.assertDictEqual(role, {'name': 'test_role', 'description': None,
'scopes_permissions': {}})
def test_show_nonexistent_role(self):
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-role-show', rolename='test_role')
self.assertEqual(ctx.exception.retcode, -errno.ENOENT)
self.assertEqual(str(ctx.exception), "Role 'test_role' does not exist")
def test_show_system_roles(self):
roles = self.exec_cmd('ac-role-show')
self.assertEqual(len(roles), len(SYSTEM_ROLES))
for role in roles:
self.assertIn(role, SYSTEM_ROLES)
def test_show_system_role(self):
role = self.exec_cmd('ac-role-show', rolename="read-only")
self.assertEqual(role['name'], 'read-only')
self.assertEqual(
role['description'],
'allows read permission for all security scope except dashboard settings and config-opt'
)
def test_delete_system_role(self):
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-role-delete', rolename='administrator')
self.assertEqual(ctx.exception.retcode, -errno.EPERM)
self.assertEqual(str(ctx.exception),
"Cannot delete system role 'administrator'")
def test_add_role_scope_perms(self):
self.test_create_role()
self.exec_cmd('ac-role-add-scope-perms', rolename='test_role',
scopename=Scope.POOL,
permissions=[Permission.READ, Permission.DELETE])
role = self.exec_cmd('ac-role-show', rolename='test_role')
self.assertDictEqual(role, {'name': 'test_role',
'description': None,
'scopes_permissions': {
Scope.POOL: [Permission.DELETE,
Permission.READ]
}})
self.validate_persistent_role('test_role', {
Scope.POOL: [Permission.DELETE, Permission.READ]
})
def test_del_role_scope_perms(self):
self.test_add_role_scope_perms()
self.exec_cmd('ac-role-add-scope-perms', rolename='test_role',
scopename=Scope.MONITOR,
permissions=[Permission.READ, Permission.CREATE])
self.validate_persistent_role('test_role', {
Scope.POOL: [Permission.DELETE, Permission.READ],
Scope.MONITOR: [Permission.CREATE, Permission.READ]
})
self.exec_cmd('ac-role-del-scope-perms', rolename='test_role',
scopename=Scope.POOL)
role = self.exec_cmd('ac-role-show', rolename='test_role')
self.assertDictEqual(role, {'name': 'test_role',
'description': None,
'scopes_permissions': {
Scope.MONITOR: [Permission.CREATE,
Permission.READ]
}})
self.validate_persistent_role('test_role', {
Scope.MONITOR: [Permission.CREATE, Permission.READ]
})
def test_add_role_scope_perms_nonexistent_role(self):
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-role-add-scope-perms', rolename='test_role',
scopename='pool',
permissions=['read', 'delete'])
self.assertEqual(ctx.exception.retcode, -errno.ENOENT)
self.assertEqual(str(ctx.exception), "Role 'test_role' does not exist")
def test_add_role_invalid_scope_perms(self):
self.test_create_role()
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-role-add-scope-perms', rolename='test_role',
scopename='invalidscope',
permissions=['read', 'delete'])
self.assertEqual(ctx.exception.retcode, -errno.EINVAL)
self.assertEqual(str(ctx.exception),
"Scope 'invalidscope' is not valid\n Possible values: "
"{}".format(Scope.all_scopes()))
def test_add_role_scope_invalid_perms(self):
self.test_create_role()
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-role-add-scope-perms', rolename='test_role',
scopename='pool', permissions=['invalidperm'])
self.assertEqual(ctx.exception.retcode, -errno.EINVAL)
self.assertEqual(str(ctx.exception),
"Permission 'invalidperm' is not valid\n Possible "
"values: {}".format(Permission.all_permissions()))
def test_del_role_scope_perms_nonexistent_role(self):
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-role-del-scope-perms', rolename='test_role',
scopename='pool')
self.assertEqual(ctx.exception.retcode, -errno.ENOENT)
self.assertEqual(str(ctx.exception), "Role 'test_role' does not exist")
def test_del_role_nonexistent_scope_perms(self):
self.test_add_role_scope_perms()
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-role-del-scope-perms', rolename='test_role',
scopename='nonexistentscope')
self.assertEqual(ctx.exception.retcode, -errno.ENOENT)
self.assertEqual(str(ctx.exception),
"There are no permissions for scope 'nonexistentscope' "
"in role 'test_role'")
def test_not_permitted_add_role_scope_perms(self):
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-role-add-scope-perms', rolename='read-only',
scopename='pool', permissions=['read', 'delete'])
self.assertEqual(ctx.exception.retcode, -errno.EPERM)
self.assertEqual(str(ctx.exception),
"Cannot update system role 'read-only'")
def test_not_permitted_del_role_scope_perms(self):
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-role-del-scope-perms', rolename='read-only',
scopename='pool')
self.assertEqual(ctx.exception.retcode, -errno.EPERM)
self.assertEqual(str(ctx.exception),
"Cannot update system role 'read-only'")
def test_create_user(self, username='admin', rolename=None, enabled=True,
pwdExpirationDate=None):
user = self.exec_cmd('ac-user-create', username=username,
rolename=rolename, inbuf='admin',
name='{} User'.format(username),
email='{}@user.com'.format(username),
enabled=enabled, force_password=True,
pwd_expiration_date=pwdExpirationDate)
pass_hash = password_hash('admin', user['password'])
self.assertDictEqual(user, {
'username': username,
'password': pass_hash,
'pwdExpirationDate': pwdExpirationDate,
'pwdUpdateRequired': False,
'lastUpdate': user['lastUpdate'],
'name': '{} User'.format(username),
'email': '{}@user.com'.format(username),
'roles': [rolename] if rolename else [],
'enabled': enabled
})
self.validate_persistent_user(username, [rolename] if rolename else [],
pass_hash, '{} User'.format(username),
'{}@user.com'.format(username),
user['lastUpdate'], enabled)
return user
def test_create_disabled_user(self):
self.test_create_user(enabled=False)
def test_create_user_pwd_expiration_date(self):
expiration_date = datetime.utcnow() + timedelta(days=10)
expiration_date = int(time.mktime(expiration_date.timetuple()))
self.test_create_user(pwdExpirationDate=expiration_date)
def test_create_user_with_role(self):
self.test_add_role_scope_perms()
self.test_create_user(rolename='test_role')
def test_create_user_with_system_role(self):
self.test_create_user(rolename='administrator')
def test_delete_user(self):
self.test_create_user()
out = self.exec_cmd('ac-user-delete', username='admin')
self.assertEqual(out, "User 'admin' deleted")
users = self.exec_cmd('ac-user-show')
self.assertEqual(len(users), 0)
self.validate_persistent_no_user('admin')
def test_create_duplicate_user(self):
self.test_create_user()
ret = self.exec_cmd('ac-user-create', username='admin', inbuf='admin',
force_password=True)
self.assertEqual(ret, "User 'admin' already exists")
def test_create_users_with_dne_role(self):
# one time call to setup our persistent db
self.setup_and_load_persistent_db()
# create a user with a role that does not exist; expect a failure
try:
self.exec_cmd('ac-user-create', username='foo',
rolename='dne_role', inbuf='foopass',
name='foo User', email='[email protected]',
force_password=True)
except CmdException as e:
self.assertEqual(e.retcode, -errno.ENOENT)
db = self.load_persistent_db()
if 'users' in db:
self.assertNotIn('foo', db['users'])
# We could just finish our test here, given we ensured that the user
# with a non-existent role is not in persistent storage. However,
# we're going to test the database's consistency, making sure that
# side-effects are not written to persistent storage once we commit
# an unrelated operation. To ensure this, we'll issue another
# operation that is sharing the same code path, and will check whether
# the next operation commits dirty state.
# create a role (this will be 'test_role')
self.test_create_role()
self.exec_cmd('ac-user-create', username='bar',
rolename='test_role', inbuf='barpass',
name='bar User', email='[email protected]',
force_password=True)
# validate db:
# user 'foo' should not exist
# user 'bar' should exist and have role 'test_role'
self.validate_persistent_user('bar', ['test_role'])
db = self.load_persistent_db()
self.assertIn('users', db)
self.assertNotIn('foo', db['users'])
def test_delete_nonexistent_user(self):
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-user-delete', username='admin')
self.assertEqual(ctx.exception.retcode, -errno.ENOENT)
self.assertEqual(str(ctx.exception), "User 'admin' does not exist")
def test_add_user_roles(self, username='admin',
roles=['pool-manager', 'block-manager']):
user_orig = self.test_create_user(username)
uroles = []
for role in roles:
uroles.append(role)
uroles.sort()
user = self.exec_cmd('ac-user-add-roles', username=username,
roles=[role])
self.assertLessEqual(uroles, user['roles'])
self.validate_persistent_user(username, uroles)
self.assertGreaterEqual(user['lastUpdate'], user_orig['lastUpdate'])
def test_add_user_roles2(self):
user_orig = self.test_create_user()
user = self.exec_cmd('ac-user-add-roles', username="admin",
roles=['pool-manager', 'block-manager'])
self.assertLessEqual(['block-manager', 'pool-manager'],
user['roles'])
self.validate_persistent_user('admin', ['block-manager',
'pool-manager'])
self.assertGreaterEqual(user['lastUpdate'], user_orig['lastUpdate'])
def test_add_user_roles_not_existent_user(self):
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-user-add-roles', username="admin",
roles=['pool-manager', 'block-manager'])
self.assertEqual(ctx.exception.retcode, -errno.ENOENT)
self.assertEqual(str(ctx.exception), "User 'admin' does not exist")
def test_add_user_roles_not_existent_role(self):
self.test_create_user()
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-user-add-roles', username="admin",
roles=['Invalid Role'])
self.assertEqual(ctx.exception.retcode, -errno.ENOENT)
self.assertEqual(str(ctx.exception),
"Role 'Invalid Role' does not exist")
def test_set_user_roles(self):
user_orig = self.test_create_user()
user = self.exec_cmd('ac-user-add-roles', username="admin",
roles=['pool-manager'])
self.assertLessEqual(['pool-manager'], user['roles'])
self.validate_persistent_user('admin', ['pool-manager'])
self.assertGreaterEqual(user['lastUpdate'], user_orig['lastUpdate'])
user2 = self.exec_cmd('ac-user-set-roles', username="admin",
roles=['rgw-manager', 'block-manager'])
self.assertLessEqual(['block-manager', 'rgw-manager'],
user2['roles'])
self.validate_persistent_user('admin', ['block-manager',
'rgw-manager'])
self.assertGreaterEqual(user2['lastUpdate'], user['lastUpdate'])
def test_set_user_roles_not_existent_user(self):
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-user-set-roles', username="admin",
roles=['pool-manager', 'block-manager'])
self.assertEqual(ctx.exception.retcode, -errno.ENOENT)
self.assertEqual(str(ctx.exception), "User 'admin' does not exist")
def test_set_user_roles_not_existent_role(self):
self.test_create_user()
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-user-set-roles', username="admin",
roles=['Invalid Role'])
self.assertEqual(ctx.exception.retcode, -errno.ENOENT)
self.assertEqual(str(ctx.exception),
"Role 'Invalid Role' does not exist")
def test_del_user_roles(self):
self.test_add_user_roles()
user = self.exec_cmd('ac-user-del-roles', username="admin",
roles=['pool-manager'])
self.assertLessEqual(['block-manager'], user['roles'])
self.validate_persistent_user('admin', ['block-manager'])
def test_del_user_roles_not_existent_user(self):
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-user-del-roles', username="admin",
roles=['pool-manager', 'block-manager'])
self.assertEqual(ctx.exception.retcode, -errno.ENOENT)
self.assertEqual(str(ctx.exception), "User 'admin' does not exist")
def test_del_user_roles_not_existent_role(self):
self.test_create_user()
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-user-del-roles', username="admin",
roles=['Invalid Role'])
self.assertEqual(ctx.exception.retcode, -errno.ENOENT)
self.assertEqual(str(ctx.exception),
"Role 'Invalid Role' does not exist")
def test_del_user_roles_not_associated_role(self):
self.test_create_user()
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-user-del-roles', username="admin",
roles=['rgw-manager'])
self.assertEqual(ctx.exception.retcode, -errno.ENOENT)
self.assertEqual(str(ctx.exception),
"Role 'rgw-manager' is not associated with user "
"'admin'")
def test_show_user(self):
self.test_add_user_roles()
user = self.exec_cmd('ac-user-show', username='admin')
pass_hash = password_hash('admin', user['password'])
self.assertDictEqual(user, {
'username': 'admin',
'lastUpdate': user['lastUpdate'],
'password': pass_hash,
'pwdExpirationDate': None,
'pwdUpdateRequired': False,
'name': 'admin User',
'email': '[email protected]',
'roles': ['block-manager', 'pool-manager'],
'enabled': True
})
def test_show_nonexistent_user(self):
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-user-show', username='admin')
self.assertEqual(ctx.exception.retcode, -errno.ENOENT)
self.assertEqual(str(ctx.exception), "User 'admin' does not exist")
def test_show_all_users(self):
self.test_add_user_roles('admin', ['administrator'])
self.test_add_user_roles('guest', ['read-only'])
users = self.exec_cmd('ac-user-show')
self.assertEqual(len(users), 2)
for user in users:
self.assertIn(user, ['admin', 'guest'])
def test_del_role_associated_with_user(self):
self.test_create_role()
self.test_add_user_roles('guest', ['test_role'])
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-role-delete', rolename='test_role')
self.assertEqual(ctx.exception.retcode, -errno.EPERM)
self.assertEqual(str(ctx.exception),
"Role 'test_role' is still associated with user "
"'guest'")
def test_set_user_info(self):
user_orig = self.test_create_user()
user = self.exec_cmd('ac-user-set-info', username='admin',
name='Admin Name', email='[email protected]')
pass_hash = password_hash('admin', user['password'])
self.assertDictEqual(user, {
'username': 'admin',
'password': pass_hash,
'pwdExpirationDate': None,
'pwdUpdateRequired': False,
'name': 'Admin Name',
'email': '[email protected]',
'lastUpdate': user['lastUpdate'],
'roles': [],
'enabled': True
})
self.validate_persistent_user('admin', [], pass_hash, 'Admin Name',
'[email protected]')
self.assertEqual(user['lastUpdate'], user_orig['lastUpdate'])
def test_set_user_info_nonexistent_user(self):
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-user-set-info', username='admin',
name='Admin Name', email='[email protected]')
self.assertEqual(ctx.exception.retcode, -errno.ENOENT)
self.assertEqual(str(ctx.exception), "User 'admin' does not exist")
def test_set_user_password(self):
user_orig = self.test_create_user()
user = self.exec_cmd('ac-user-set-password', username='admin',
inbuf='newpass', force_password=True)
pass_hash = password_hash('newpass', user['password'])
self.assertDictEqual(user, {
'username': 'admin',
'password': pass_hash,
'pwdExpirationDate': None,
'pwdUpdateRequired': False,
'name': 'admin User',
'email': '[email protected]',
'lastUpdate': user['lastUpdate'],
'roles': [],
'enabled': True
})
self.validate_persistent_user('admin', [], pass_hash, 'admin User',
'[email protected]')
self.assertGreaterEqual(user['lastUpdate'], user_orig['lastUpdate'])
def test_sanitize_password(self):
self.test_create_user()
password = 'myPass\\n\\r\\n'
with tempfile.TemporaryFile(mode='w+') as pwd_file:
# Add new line separators (like some text editors when a file is saved).
pwd_file.write('{}{}'.format(password, '\n\r\n\n'))
pwd_file.seek(0)
user = self.exec_cmd('ac-user-set-password', username='admin',
inbuf=pwd_file.read(), force_password=True)
pass_hash = password_hash(password, user['password'])
self.assertEqual(user['password'], pass_hash)
def test_set_user_password_nonexistent_user(self):
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-user-set-password', username='admin',
inbuf='newpass', force_password=True)
self.assertEqual(ctx.exception.retcode, -errno.ENOENT)
self.assertEqual(str(ctx.exception), "User 'admin' does not exist")
def test_set_user_password_empty(self):
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-user-set-password', username='admin', inbuf='\n',
force_password=True)
self.assertEqual(ctx.exception.retcode, -errno.EINVAL)
self.assertIn(ERROR_MSG_EMPTY_INPUT_FILE, str(ctx.exception))
def test_set_user_password_hash(self):
user_orig = self.test_create_user()
user = self.exec_cmd('ac-user-set-password-hash', username='admin',
inbuf='$2b$12$Pt3Vq/rDt2y9glTPSV.VFegiLkQeIpddtkhoFetNApYmIJOY8gau2')
pass_hash = password_hash('newpass', user['password'])
self.assertDictEqual(user, {
'username': 'admin',
'password': pass_hash,
'pwdExpirationDate': None,
'pwdUpdateRequired': False,
'name': 'admin User',
'email': '[email protected]',
'lastUpdate': user['lastUpdate'],
'roles': [],
'enabled': True
})
self.validate_persistent_user('admin', [], pass_hash, 'admin User',
'[email protected]')
self.assertGreaterEqual(user['lastUpdate'], user_orig['lastUpdate'])
def test_set_user_password_hash_nonexistent_user(self):
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-user-set-password-hash', username='admin',
inbuf='$2b$12$Pt3Vq/rDt2y9glTPSV.VFegiLkQeIpddtkhoFetNApYmIJOY8gau2')
self.assertEqual(ctx.exception.retcode, -errno.ENOENT)
self.assertEqual(str(ctx.exception), "User 'admin' does not exist")
def test_set_user_password_hash_broken_hash(self):
self.test_create_user()
with self.assertRaises(CmdException) as ctx:
self.exec_cmd('ac-user-set-password-hash', username='admin',
inbuf='1')
self.assertEqual(ctx.exception.retcode, -errno.EINVAL)
self.assertEqual(str(ctx.exception), 'Invalid password hash')
def test_set_login_credentials(self):
self.exec_cmd('set-login-credentials', username='admin',
inbuf='admin')
user = self.exec_cmd('ac-user-show', username='admin')
pass_hash = password_hash('admin', user['password'])
self.assertDictEqual(user, {
'username': 'admin',
'password': pass_hash,
'pwdExpirationDate': None,
'pwdUpdateRequired': False,
'name': None,
'email': None,
'lastUpdate': user['lastUpdate'],
'roles': ['administrator'],
'enabled': True,
})
self.validate_persistent_user('admin', ['administrator'], pass_hash,
None, None)
def test_set_login_credentials_for_existing_user(self):
self.test_add_user_roles('admin', ['read-only'])
self.exec_cmd('set-login-credentials', username='admin',
inbuf='admin2')
user = self.exec_cmd('ac-user-show', username='admin')
pass_hash = password_hash('admin2', user['password'])
self.assertDictEqual(user, {
'username': 'admin',
'password': pass_hash,
'pwdExpirationDate': None,
'pwdUpdateRequired': False,
'name': 'admin User',
'email': '[email protected]',
'lastUpdate': user['lastUpdate'],
'roles': ['read-only'],
'enabled': True
})
self.validate_persistent_user('admin', ['read-only'], pass_hash,
'admin User', '[email protected]')
def test_load_v1(self):
self.CONFIG_KEY_DICT['accessdb_v1'] = '''
{{
"users": {{
"admin": {{
"username": "admin",
"password":
"$2b$12$sd0Az7mm3FaJl8kN3b/xwOuztaN0sWUwC1SJqjM4wcDw/s5cmGbLK",
"roles": ["block-manager", "test_role"],
"name": "admin User",
"email": "[email protected]",
"lastUpdate": {}
}}
}},
"roles": {{
"test_role": {{
"name": "test_role",
"description": "Test Role",
"scopes_permissions": {{
"{}": ["{}", "{}"],
"{}": ["{}"]
}}
}}
}},
"version": 1
}}
'''.format(int(round(time.time())), Scope.ISCSI, Permission.READ,
Permission.UPDATE, Scope.POOL, Permission.CREATE)
load_access_control_db()
role = self.exec_cmd('ac-role-show', rolename="test_role")
self.assertDictEqual(role, {
'name': 'test_role',
'description': "Test Role",
'scopes_permissions': {
Scope.ISCSI: [Permission.READ, Permission.UPDATE],
Scope.POOL: [Permission.CREATE]
}
})
user = self.exec_cmd('ac-user-show', username="admin")
self.assertDictEqual(user, {
'username': 'admin',
'lastUpdate': user['lastUpdate'],
'password':
"$2b$12$sd0Az7mm3FaJl8kN3b/xwOuztaN0sWUwC1SJqjM4wcDw/s5cmGbLK",
'pwdExpirationDate': None,
'pwdUpdateRequired': False,
'name': 'admin User',
'email': '[email protected]',
'roles': ['block-manager', 'test_role'],
'enabled': True
})
def test_load_v2(self):
self.CONFIG_KEY_DICT['accessdb_v2'] = '''
{{
"users": {{
"admin": {{
"username": "admin",
"password":
"$2b$12$sd0Az7mm3FaJl8kN3b/xwOuztaN0sWUwC1SJqjM4wcDw/s5cmGbLK",
"pwdExpirationDate": null,
"pwdUpdateRequired": false,
"roles": ["block-manager", "test_role"],
"name": "admin User",
"email": "[email protected]",
"lastUpdate": {},
"enabled": true
}}
}},
"roles": {{
"test_role": {{
"name": "test_role",
"description": "Test Role",
"scopes_permissions": {{
"{}": ["{}", "{}"],
"{}": ["{}"]
}}
}}
}},
"version": 2
}}
'''.format(int(round(time.time())), Scope.ISCSI, Permission.READ,
Permission.UPDATE, Scope.POOL, Permission.CREATE)
load_access_control_db()
role = self.exec_cmd('ac-role-show', rolename="test_role")
self.assertDictEqual(role, {
'name': 'test_role',
'description': "Test Role",
'scopes_permissions': {
Scope.ISCSI: [Permission.READ, Permission.UPDATE],
Scope.POOL: [Permission.CREATE]
}
})
user = self.exec_cmd('ac-user-show', username="admin")
self.assertDictEqual(user, {
'username': 'admin',
'lastUpdate': user['lastUpdate'],
'password':
"$2b$12$sd0Az7mm3FaJl8kN3b/xwOuztaN0sWUwC1SJqjM4wcDw/s5cmGbLK",
'pwdExpirationDate': None,
'pwdUpdateRequired': False,
'name': 'admin User',
'email': '[email protected]',
'roles': ['block-manager', 'test_role'],
'enabled': True
})
def test_password_policy_pw_length(self):
Settings.PWD_POLICY_CHECK_LENGTH_ENABLED = True
Settings.PWD_POLICY_MIN_LENGTH = 3
pw_policy = PasswordPolicy('foo')
self.assertTrue(pw_policy.check_password_length())
def test_password_policy_pw_length_fail(self):
Settings.PWD_POLICY_CHECK_LENGTH_ENABLED = True
pw_policy = PasswordPolicy('bar')
self.assertFalse(pw_policy.check_password_length())
def test_password_policy_credits_too_weak(self):
Settings.PWD_POLICY_CHECK_COMPLEXITY_ENABLED = True
pw_policy = PasswordPolicy('foo')
pw_credits = pw_policy.check_password_complexity()
self.assertEqual(pw_credits, 3)
def test_password_policy_credits_weak(self):
Settings.PWD_POLICY_CHECK_COMPLEXITY_ENABLED = True
pw_policy = PasswordPolicy('mypassword1')
pw_credits = pw_policy.check_password_complexity()
self.assertEqual(pw_credits, 11)
def test_password_policy_credits_ok(self):
Settings.PWD_POLICY_CHECK_COMPLEXITY_ENABLED = True
pw_policy = PasswordPolicy('mypassword1!@')
pw_credits = pw_policy.check_password_complexity()
self.assertEqual(pw_credits, 17)
def test_password_policy_credits_strong(self):
Settings.PWD_POLICY_CHECK_COMPLEXITY_ENABLED = True
pw_policy = PasswordPolicy('testpassword0047!@')
pw_credits = pw_policy.check_password_complexity()
self.assertEqual(pw_credits, 22)
def test_password_policy_credits_very_strong(self):
Settings.PWD_POLICY_CHECK_COMPLEXITY_ENABLED = True
pw_policy = PasswordPolicy('testpassword#!$!@$')
pw_credits = pw_policy.check_password_complexity()
self.assertEqual(pw_credits, 30)
def test_password_policy_forbidden_words(self):
Settings.PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED = True
pw_policy = PasswordPolicy('!@$testdashboard#!$')
self.assertTrue(pw_policy.check_if_contains_forbidden_words())
def test_password_policy_forbidden_words_custom(self):
Settings.PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED = True
Settings.PWD_POLICY_EXCLUSION_LIST = 'foo,bar'
pw_policy = PasswordPolicy('foo123bar')
self.assertTrue(pw_policy.check_if_contains_forbidden_words())
def test_password_policy_sequential_chars(self):
Settings.PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED = True
pw_policy = PasswordPolicy('!@$test123#!$')
self.assertTrue(pw_policy.check_if_sequential_characters())
def test_password_policy_repetitive_chars(self):
Settings.PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED = True
pw_policy = PasswordPolicy('!@$testfooo#!$')
self.assertTrue(pw_policy.check_if_repetitive_characters())
def test_password_policy_contain_username(self):
Settings.PWD_POLICY_CHECK_USERNAME_ENABLED = True
pw_policy = PasswordPolicy('%admin135)', 'admin')
self.assertTrue(pw_policy.check_if_contains_username())
def test_password_policy_is_old_pwd(self):
Settings.PWD_POLICY_CHECK_OLDPWD_ENABLED = True
pw_policy = PasswordPolicy('foo', old_password='foo')
self.assertTrue(pw_policy.check_is_old_password())
| 38,042 | 42.677382 | 100 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/tests/test_api_auditing.py
|
# -*- coding: utf-8 -*-
import json
import re
try:
import mock
except ImportError:
import unittest.mock as mock
from .. import mgr
from ..controllers import RESTController, Router
from ..tests import ControllerTestCase, KVStoreMockMixin
# pylint: disable=W0613
@Router('/foo', secure=False)
class FooResource(RESTController):
def create(self, password):
pass
def get(self, key):
pass
def delete(self, key):
pass
def set(self, key, password, secret_key=None):
pass
class ApiAuditingTest(ControllerTestCase, KVStoreMockMixin):
_request_logging = True
@classmethod
def setup_server(cls):
cls.setup_controllers([FooResource])
def setUp(self):
self.mock_kv_store()
mgr.cluster_log = mock.Mock()
mgr.set_module_option('AUDIT_API_ENABLED', True)
mgr.set_module_option('AUDIT_API_LOG_PAYLOAD', True)
def _validate_cluster_log_msg(self, path, method, user, params):
channel, _, msg = mgr.cluster_log.call_args_list[0][0]
self.assertEqual(channel, 'audit')
pattern = r'^\[DASHBOARD\] from=\'(.+)\' path=\'(.+)\' ' \
'method=\'(.+)\' user=\'(.+)\' params=\'(.+)\'$'
m = re.match(pattern, msg)
self.assertEqual(m.group(2), path)
self.assertEqual(m.group(3), method)
self.assertEqual(m.group(4), user)
self.assertDictEqual(json.loads(m.group(5)), params)
def test_no_audit(self):
mgr.set_module_option('AUDIT_API_ENABLED', False)
self._delete('/foo/test1')
mgr.cluster_log.assert_not_called()
def test_no_payload(self):
mgr.set_module_option('AUDIT_API_LOG_PAYLOAD', False)
self._delete('/foo/test1')
_, _, msg = mgr.cluster_log.call_args_list[0][0]
self.assertNotIn('params=', msg)
def test_no_audit_get(self):
self._get('/foo/test1')
mgr.cluster_log.assert_not_called()
def test_audit_put(self):
self._put('/foo/test1', {'password': 'y', 'secret_key': 1234})
mgr.cluster_log.assert_called_once()
self._validate_cluster_log_msg('/foo/test1', 'PUT', 'None',
{'key': 'test1',
'password': '***',
'secret_key': '***'})
def test_audit_post(self):
with mock.patch('dashboard.services.auth.JwtManager.get_username',
return_value='hugo'):
self._post('/foo?password=1234')
mgr.cluster_log.assert_called_once()
self._validate_cluster_log_msg('/foo', 'POST', 'hugo',
{'password': '***'})
def test_audit_delete(self):
self._delete('/foo/test1')
mgr.cluster_log.assert_called_once()
self._validate_cluster_log_msg('/foo/test1', 'DELETE',
'None', {'key': 'test1'})
| 2,970 | 30.946237 | 74 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/tests/test_auth.py
|
import unittest
from unittest.mock import Mock, patch
from .. import mgr
from ..controllers.auth import Auth
from ..services.auth import JwtManager
from ..tests import ControllerTestCase
mgr.get_module_option.return_value = JwtManager.JWT_TOKEN_TTL
mgr.get_store.return_value = 'jwt_secret'
mgr.ACCESS_CTRL_DB = Mock()
mgr.ACCESS_CTRL_DB.get_attempt.return_value = 1
class JwtManagerTest(unittest.TestCase):
def test_generate_token_and_decode(self):
mgr.get_module_option.return_value = JwtManager.JWT_TOKEN_TTL
mgr.get_store.return_value = 'jwt_secret'
token = JwtManager.gen_token('my-username')
self.assertIsInstance(token, str)
self.assertTrue(token)
decoded_token = JwtManager.decode_token(token)
self.assertIsInstance(decoded_token, dict)
self.assertEqual(decoded_token['iss'], 'ceph-dashboard')
self.assertEqual(decoded_token['username'], 'my-username')
class AuthTest(ControllerTestCase):
@classmethod
def setup_server(cls):
cls.setup_controllers([Auth])
def test_request_not_authorized(self):
self.setup_controllers([Auth], cp_config={'tools.authenticate.on': True})
self._post('/api/auth/logout')
self.assertStatus(401)
@patch('dashboard.controllers.auth.JwtManager.gen_token', Mock(return_value='my-token'))
@patch('dashboard.controllers.auth.AuthManager.authenticate', Mock(return_value={
'permissions': {'rgw': ['read']},
'pwdExpirationDate': 1000000,
'pwdUpdateRequired': False
}))
def test_login(self):
self._post('/api/auth', {'username': 'my-user', 'password': 'my-pass'})
self.assertStatus(201)
self.assertJsonBody({
'token': 'my-token',
'username': 'my-user',
'permissions': {'rgw': ['read']},
'pwdExpirationDate': 1000000,
'sso': False,
'pwdUpdateRequired': False
})
@patch('dashboard.controllers.auth.JwtManager', Mock())
def test_logout(self):
self._post('/api/auth/logout')
self.assertStatus(200)
self.assertJsonBody({
'redirect_url': '#/login'
})
| 2,200 | 31.850746 | 92 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/tests/test_cache.py
|
import unittest
from ..plugins.ttl_cache import CacheManager, TTLCache
class TTLCacheTest(unittest.TestCase):
def test_get(self):
ref = 'testcache'
cache = TTLCache(ref, 30)
with self.assertRaises(KeyError):
val = cache['foo']
cache['foo'] = 'var'
val = cache['foo']
self.assertEqual(val, 'var')
self.assertEqual(cache.hits, 1)
self.assertEqual(cache.misses, 1)
def test_ttl(self):
ref = 'testcache'
cache = TTLCache(ref, 0.0000001)
cache['foo'] = 'var'
# pylint: disable=pointless-statement
with self.assertRaises(KeyError):
cache['foo']
self.assertEqual(cache.hits, 0)
self.assertEqual(cache.misses, 1)
self.assertEqual(cache.expired, 1)
def test_maxsize_fifo(self):
ref = 'testcache'
cache = TTLCache(ref, 30, 2)
cache['foo0'] = 'var0'
cache['foo1'] = 'var1'
cache['foo2'] = 'var2'
# pylint: disable=pointless-statement
with self.assertRaises(KeyError):
cache['foo0']
self.assertEqual(cache.hits, 0)
self.assertEqual(cache.misses, 1)
class TTLCacheManagerTest(unittest.TestCase):
def test_get(self):
ref = 'testcache'
cache0 = CacheManager.get(ref)
cache1 = CacheManager.get(ref)
self.assertEqual(id(cache0), id(cache1))
| 1,416 | 27.918367 | 54 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/tests/test_ceph_service.py
|
# -*- coding: utf-8 -*-
# pylint: disable=dangerous-default-value,too-many-public-methods
import logging
import unittest
from contextlib import contextmanager
from unittest import mock
import pytest
from ..services.ceph_service import CephService
class CephServiceTest(unittest.TestCase):
pools = [{
'pool_name': 'good_pool',
'pool': 1,
}, {
'pool_name': 'bad_pool',
'pool': 2,
'flaky': 'option_x'
}]
def setUp(self):
# Mock get_pool_list
self.list_patch = mock.patch('dashboard.services.ceph_service.CephService.get_pool_list')
self.list = self.list_patch.start()
self.list.return_value = self.pools
# Mock mgr.get
self.mgr_patch = mock.patch('dashboard.mgr.get')
self.mgr = self.mgr_patch.start()
self.mgr.return_value = {
'by_pool': {
'1': {'active+clean': 16},
'2': {'creating+incomplete': 16},
}
}
self.service = CephService()
def tearDown(self):
self.list_patch.stop()
self.mgr_patch.stop()
def test_get_pool_by_attribute_with_match(self):
self.assertEqual(self.service.get_pool_by_attribute('pool', 1), self.pools[0])
self.assertEqual(self.service.get_pool_by_attribute('pool_name', 'bad_pool'), self.pools[1])
def test_get_pool_by_attribute_without_a_match(self):
self.assertEqual(self.service.get_pool_by_attribute('pool', 3), None)
self.assertEqual(self.service.get_pool_by_attribute('not_there', 'sth'), None)
def test_get_pool_by_attribute_matching_a_not_always_set_attribute(self):
self.assertEqual(self.service.get_pool_by_attribute('flaky', 'option_x'), self.pools[1])
@mock.patch('dashboard.mgr.rados.pool_reverse_lookup', return_value='good_pool')
def test_get_pool_name_from_id_with_match(self, _mock):
self.assertEqual(self.service.get_pool_name_from_id(1), 'good_pool')
@mock.patch('dashboard.mgr.rados.pool_reverse_lookup', return_value=None)
def test_get_pool_name_from_id_without_match(self, _mock):
self.assertEqual(self.service.get_pool_name_from_id(3), None)
def test_get_pool_pg_status(self):
self.assertEqual(self.service.get_pool_pg_status('good_pool'), {'active+clean': 16})
def test_get_pg_status_without_match(self):
self.assertEqual(self.service.get_pool_pg_status('no-pool'), {})
@contextmanager
def mock_smart_data(data):
devices = [{'devid': devid} for devid in data]
def _get_smart_data(d):
return {d['devid']: data[d['devid']]}
with mock.patch.object(CephService, '_get_smart_data_by_device', side_effect=_get_smart_data), \
mock.patch.object(CephService, 'get_devices_by_host', return_value=devices), \
mock.patch.object(CephService, 'get_devices_by_daemon', return_value=devices):
yield
@pytest.mark.parametrize(
"by,args,log",
[
('host', ('osd0',), 'from host osd0'),
('daemon', ('osd', '1'), 'with ID 1')
]
)
def test_get_smart_data(caplog, by, args, log):
# pylint: disable=protected-access
expected_data = {
'aaa': {'device': {'name': '/dev/sda'}},
'bbb': {'device': {'name': '/dev/sdb'}},
}
with mock_smart_data(expected_data):
smart_data = getattr(CephService, 'get_smart_data_by_{}'.format(by))(*args)
getattr(CephService, 'get_devices_by_{}'.format(by)).assert_called_with(*args)
CephService._get_smart_data_by_device.assert_called()
assert smart_data == expected_data
with caplog.at_level(logging.DEBUG):
with mock_smart_data([]):
smart_data = getattr(CephService, 'get_smart_data_by_{}'.format(by))(*args)
getattr(CephService, 'get_devices_by_{}'.format(by)).assert_called_with(*args)
CephService._get_smart_data_by_device.assert_not_called()
assert smart_data == {}
assert log in caplog.text
@mock.patch.object(CephService, 'send_command')
def test_get_smart_data_by_device(send_command):
# pylint: disable=protected-access
device_id = 'Hitachi_HUA72201_JPW9K0N20D22SE'
osd_tree_payload = {'nodes':
[
{'name': 'osd.1', 'status': 'down'},
{'name': 'osd.2', 'status': 'up'},
{'name': 'osd.3', 'status': 'up'}
]}
health_metrics_payload = {device_id: {'ata_apm': {'enabled': False}}}
side_effect = [osd_tree_payload, health_metrics_payload]
# Daemons associated: 1 osd down, 2 osd up.
send_command.side_effect = side_effect
smart_data = CephService._get_smart_data_by_device(
{'devid': device_id, 'daemons': ['osd.1', 'osd.2', 'osd.3']})
assert smart_data == health_metrics_payload
send_command.assert_has_calls([mock.call('mon', 'osd tree'),
mock.call('osd', 'smart', '2', devid=device_id)])
# Daemons associated: 1 osd down.
send_command.reset_mock()
send_command.side_effect = [osd_tree_payload]
smart_data = CephService._get_smart_data_by_device({'devid': device_id, 'daemons': ['osd.1']})
assert smart_data == {}
send_command.assert_has_calls([mock.call('mon', 'osd tree')])
# Daemons associated: 1 osd down, 1 mon.
send_command.reset_mock()
send_command.side_effect = side_effect
smart_data = CephService._get_smart_data_by_device(
{'devid': device_id, 'daemons': ['osd.1', 'mon.1']})
assert smart_data == health_metrics_payload
send_command.assert_has_calls([mock.call('mon', 'osd tree'),
mock.call('mon', 'device query-daemon-health-metrics',
who='mon.1')])
# Daemons associated: 1 mon.
send_command.reset_mock()
send_command.side_effect = side_effect
smart_data = CephService._get_smart_data_by_device({'devid': device_id, 'daemons': ['mon.1']})
assert smart_data == health_metrics_payload
send_command.assert_has_calls([mock.call('mon', 'osd tree'),
mock.call('mon', 'device query-daemon-health-metrics',
who='mon.1')])
# Daemons associated: 1 other (non-osd, non-mon).
send_command.reset_mock()
send_command.side_effect = [osd_tree_payload]
smart_data = CephService._get_smart_data_by_device({'devid': device_id, 'daemons': ['rgw.1']})
assert smart_data == {}
send_command.assert_has_calls([mock.call('mon', 'osd tree')])
# Daemons associated: no daemons.
send_command.reset_mock()
smart_data = CephService._get_smart_data_by_device({'devid': device_id, 'daemons': []})
assert smart_data == {}
send_command.assert_has_calls([])
| 6,852 | 39.311765 | 100 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/tests/test_ceph_users.py
|
import unittest.mock as mock
from jsonschema import validate
from ..controllers.ceph_users import CephUser, create_form
from ..tests import ControllerTestCase
auth_dump_mock = {"auth_dump": [
{"entity": "client.admin",
"key": "RANDOMFi7NwMARAA7RdGqdav+BEEFDEAD0x00g==",
"caps": {"mds": "allow *",
"mgr": "allow *",
"mon": "allow *",
"osd": "allow *"}},
{"entity": "client.bootstrap-mds",
"key": "2RANDOMi7NwMARAA7RdGqdav+BEEFDEAD0x00g==",
"caps": {"mds": "allow *",
"osd": "allow *"}}
]}
class CephUsersControllerTestCase(ControllerTestCase):
@classmethod
def setup_server(cls):
cls.setup_crud_controllers(CephUser)
@mock.patch('dashboard.services.ceph_service.CephService.send_command')
def test_get_all(self, send_command):
send_command.return_value = auth_dump_mock
self._get('/api/cluster/user')
self.assertStatus(200)
self.assertJsonBody([
{"entity": "client.admin",
"caps": {"mds": "allow *",
"mgr": "allow *",
"mon": "allow *",
"osd": "allow *"},
"key": "***********"
},
{"entity": "client.bootstrap-mds",
"caps": {"mds": "allow *",
"osd": "allow *"},
"key": "***********"
}
])
def test_create_form(self):
form_dict = create_form.to_dict()
schema = {'schema': form_dict['control_schema'], 'layout': form_dict['ui_schema']}
validate(instance={'user_entity': 'foo',
'capabilities': [{"entity": "mgr", "cap": "allow *"}]},
schema=schema['schema'])
| 1,769 | 32.396226 | 90 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/tests/test_cephfs.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
try:
from mock import Mock
except ImportError:
from unittest.mock import patch, Mock
from ..controllers.cephfs import CephFS
from ..tests import ControllerTestCase
class MetaDataMock(object):
def get(self, _x, _y):
return 'bar'
def get_metadata_mock(key, meta_key):
return {
'mds': {
None: None, # Unknown key
'foo': MetaDataMock()
}[meta_key]
}[key]
@patch('dashboard.mgr.get_metadata', Mock(side_effect=get_metadata_mock))
class CephFsTest(ControllerTestCase):
cephFs = CephFS()
def test_append_of_mds_metadata_if_key_is_not_found(self):
mds_versions = defaultdict(list)
# pylint: disable=protected-access
self.cephFs._append_mds_metadata(mds_versions, None)
self.assertEqual(len(mds_versions), 0)
def test_append_of_mds_metadata_with_existing_metadata(self):
mds_versions = defaultdict(list)
# pylint: disable=protected-access
self.cephFs._append_mds_metadata(mds_versions, 'foo')
self.assertEqual(len(mds_versions), 1)
self.assertEqual(mds_versions['bar'], ['foo'])
| 1,193 | 26.767442 | 73 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/tests/test_cluster_upgrade.py
|
from ..controllers.cluster import ClusterUpgrade
from ..tests import ControllerTestCase, patch_orch
from ..tools import NotificationQueue, TaskManager
class ClusterUpgradeControllerTest(ControllerTestCase):
URL_CLUSTER_UPGRADE = '/api/cluster/upgrade'
@classmethod
def setup_server(cls):
NotificationQueue.start_queue()
TaskManager.init()
cls.setup_controllers([ClusterUpgrade])
@classmethod
def tearDownClass(cls):
NotificationQueue.stop()
def test_upgrade_list(self):
result = ['17.1.0', '16.2.7', '16.2.6', '16.2.5', '16.1.4', '16.1.3']
with patch_orch(True) as fake_client:
fake_client.upgrades.list.return_value = result
self._get('{}?image=quay.io/ceph/ceph:v16.1.0&tags=False&show_all_versions=False'
.format(self.URL_CLUSTER_UPGRADE))
self.assertStatus(200)
self.assertJsonBody(result)
def test_start_upgrade(self):
msg = "Initiating upgrade to 17.2.6"
with patch_orch(True) as fake_client:
fake_client.upgrades.start.return_value = msg
payload = {
'version': '17.2.6'
}
self._post('{}/start'.format(self.URL_CLUSTER_UPGRADE), payload)
self.assertStatus(200)
self.assertJsonBody(msg)
def test_pause_upgrade(self):
msg = "Paused upgrade to 17.2.6"
with patch_orch(True) as fake_client:
fake_client.upgrades.pause.return_value = msg
self._put('{}/pause'.format(self.URL_CLUSTER_UPGRADE))
self.assertStatus(200)
self.assertJsonBody(msg)
def test_resume_upgrade(self):
msg = "Resumed upgrade to 17.2.6"
with patch_orch(True) as fake_client:
fake_client.upgrades.resume.return_value = msg
self._put('{}/resume'.format(self.URL_CLUSTER_UPGRADE))
self.assertStatus(200)
self.assertJsonBody(msg)
def test_stop_upgrade(self):
msg = "Stopped upgrade to 17.2.6"
with patch_orch(True) as fake_client:
fake_client.upgrades.stop.return_value = msg
self._put('{}/stop'.format(self.URL_CLUSTER_UPGRADE))
self.assertStatus(200)
self.assertJsonBody(msg)
| 2,307 | 36.225806 | 93 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/tests/test_controllers.py
|
# -*- coding: utf-8 -*-
from ..controllers import APIRouter, BaseController, Endpoint, RESTController, Router
from ..tests import ControllerTestCase
@Router("/btest/{key}", base_url="/ui", secure=False)
class BTest(BaseController):
@Endpoint()
def test1(self, key, opt=1):
return {'key': key, 'opt': opt}
@Endpoint()
def test2(self, key, skey, opt=1):
return {'key': key, 'skey': skey, 'opt': opt}
@Endpoint(path="/foo/{skey}/test-3")
def test3(self, key, skey, opt=1):
return {'key': key, 'skey': skey, 'opt': opt}
@Endpoint('POST', path="/foo/{skey}/test-3", query_params=['opt'])
def test4(self, key, skey, data, opt=1):
return {'key': key, 'skey': skey, 'data': data, 'opt': opt}
@Endpoint('PUT', path_params=['skey'], query_params=['opt'])
def test5(self, key, skey, data1, data2=None, opt=1):
return {'key': key, 'skey': skey, 'data1': data1, 'data2': data2,
'opt': opt}
@Endpoint('GET', json_response=False)
def test6(self, key, opt=1):
return "My Formatted string key={} opt={}".format(key, opt)
@Endpoint()
def __call__(self, key, opt=1):
return {'key': key, 'opt': opt}
@APIRouter("/rtest/{key}", secure=False)
class RTest(RESTController):
RESOURCE_ID = 'skey/ekey'
def list(self, key, opt=1):
return {'key': key, 'opt': opt}
def create(self, key, data1, data2=None):
return {'key': key, 'data1': data1, 'data2': data2}
def get(self, key, skey, ekey, opt=1):
return {'key': key, 'skey': skey, 'ekey': ekey, 'opt': opt}
def set(self, key, skey, ekey, data):
return {'key': key, 'skey': skey, 'ekey': ekey, 'data': data}
def delete(self, key, skey, ekey, opt=1):
pass
def bulk_set(self, key, data1, data2=None):
return {'key': key, 'data1': data1, 'data2': data2}
def bulk_delete(self, key, opt=1):
pass
@RESTController.Collection('POST')
def cmethod(self, key, data):
return {'key': key, 'data': data}
@RESTController.Resource('GET')
def rmethod(self, key, skey, ekey, opt=1):
return {'key': key, 'skey': skey, 'ekey': ekey, 'opt': opt}
@Router("/", secure=False)
class Root(BaseController):
@Endpoint(json_response=False)
def __call__(self):
return "<html></html>"
class ControllersTest(ControllerTestCase):
@classmethod
def setup_server(cls):
cls.setup_controllers([BTest, RTest], "/test")
def test_1(self):
self._get('/test/ui/btest/{}/test1?opt=3'.format(100))
self.assertStatus(200)
self.assertJsonBody({'key': '100', 'opt': '3'})
def test_2(self):
self._get('/test/ui/btest/{}/test2/{}?opt=3'.format(100, 200))
self.assertStatus(200)
self.assertJsonBody({'key': '100', 'skey': '200', 'opt': '3'})
def test_3(self):
self._get('/test/ui/btest/{}/foo/{}/test-3?opt=3'.format(100, 200))
self.assertStatus(200)
self.assertJsonBody({'key': '100', 'skey': '200', 'opt': '3'})
def test_4(self):
self._post('/test/ui/btest/{}/foo/{}/test-3?opt=3'.format(100, 200),
{'data': 30})
self.assertStatus(200)
self.assertJsonBody({'key': '100', 'skey': '200', 'data': 30,
'opt': '3'})
def test_5(self):
self._put('/test/ui/btest/{}/test5/{}?opt=3'.format(100, 200),
{'data1': 40, 'data2': "hello"})
self.assertStatus(200)
self.assertJsonBody({'key': '100', 'skey': '200', 'data1': 40,
'data2': "hello", 'opt': '3'})
def test_6(self):
self._get('/test/ui/btest/{}/test6'.format(100))
self.assertStatus(200)
self.assertBody("My Formatted string key=100 opt=1")
def test_7(self):
self._get('/test/ui/btest/{}?opt=3'.format(100))
self.assertStatus(200)
self.assertJsonBody({'key': '100', 'opt': '3'})
def test_rest_list(self):
self._get('/test/api/rtest/{}?opt=2'.format(300))
self.assertStatus(200)
self.assertJsonBody({'key': '300', 'opt': '2'})
def test_rest_create(self):
self._post('/test/api/rtest/{}'.format(300),
{'data1': 20, 'data2': True})
self.assertStatus(201)
self.assertJsonBody({'key': '300', 'data1': 20, 'data2': True})
def test_rest_get(self):
self._get('/test/api/rtest/{}/{}/{}?opt=3'.format(300, 1, 2))
self.assertStatus(200)
self.assertJsonBody({'key': '300', 'skey': '1', 'ekey': '2',
'opt': '3'})
def test_rest_set(self):
self._put('/test/api/rtest/{}/{}/{}'.format(300, 1, 2),
{'data': 40})
self.assertStatus(200)
self.assertJsonBody({'key': '300', 'skey': '1', 'ekey': '2',
'data': 40})
def test_rest_delete(self):
self._delete('/test/api/rtest/{}/{}/{}?opt=3'.format(300, 1, 2))
self.assertStatus(204)
def test_rest_bulk_set(self):
self._put('/test/api/rtest/{}'.format(300),
{'data1': 20, 'data2': True})
self.assertStatus(200)
self.assertJsonBody({'key': '300', 'data1': 20, 'data2': True})
self._put('/test/api/rtest/{}'.format(400),
{'data1': 20, 'data2': ['one', 'two', 'three']})
self.assertStatus(200)
self.assertJsonBody({
'key': '400',
'data1': 20,
'data2': ['one', 'two', 'three'],
})
def test_rest_bulk_delete(self):
self._delete('/test/api/rtest/{}?opt=2'.format(300))
self.assertStatus(204)
def test_rest_collection(self):
self._post('/test/api/rtest/{}/cmethod'.format(300), {'data': 30})
self.assertStatus(200)
self.assertJsonBody({'key': '300', 'data': 30})
def test_rest_resource(self):
self._get('/test/api/rtest/{}/{}/{}/rmethod?opt=4'.format(300, 2, 3))
self.assertStatus(200)
self.assertJsonBody({'key': '300', 'skey': '2', 'ekey': '3',
'opt': '4'})
class RootControllerTest(ControllerTestCase):
@classmethod
def setup_server(cls):
cls.setup_controllers([Root])
def test_index(self):
self._get("/")
self.assertBody("<html></html>")
| 6,390 | 32.460733 | 85 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/tests/test_crud.py
|
# pylint: disable=C0102
import json
from typing import NamedTuple
import pytest
from jsonschema import validate
from ..controllers._crud import ArrayHorizontalContainer, \
ArrayVerticalContainer, Form, FormField, HorizontalContainer, SecretStr, \
VerticalContainer, serialize
def assertObjectEquals(a, b):
assert json.dumps(a) == json.dumps(b)
class NamedTupleMock(NamedTuple):
foo: int
var: str
class NamedTupleSecretMock(NamedTuple):
foo: int
var: str
key: SecretStr
@pytest.mark.parametrize("inp,out", [
(["foo", "var"], ["foo", "var"]),
(NamedTupleMock(1, "test"), {"foo": 1, "var": "test"}),
(NamedTupleSecretMock(1, "test", "imaginethisisakey"), {"foo": 1, "var": "test",
"key": "***********"}),
((1, 2, 3), [1, 2, 3]),
(set((1, 2, 3)), [1, 2, 3]),
])
def test_serialize(inp, out):
assertObjectEquals(serialize(inp), out)
def test_schema():
form = Form(path='/cluster/user/create',
root_container=VerticalContainer('Create user', key='create_user', fields=[
FormField('User entity', key='user_entity', field_type=str),
ArrayHorizontalContainer('Capabilities', key='caps', fields=[
FormField('left', field_type=str, key='left'),
FormField('right', key='right', field_type=str)
]),
ArrayVerticalContainer('ah', key='ah', fields=[
FormField('top', key='top', field_type=str),
FormField('bottom', key='bottom', field_type=str)
]),
HorizontalContainer('oh', key='oh', fields=[
FormField('left', key='left', field_type=str),
FormField('right', key='right', field_type=str)
]),
VerticalContainer('ov', key='ov', fields=[
FormField('top', key='top', field_type=str),
FormField('bottom', key='bottom', field_type=bool)
]),
]))
form_dict = form.to_dict()
schema = {'schema': form_dict['control_schema'], 'layout': form_dict['ui_schema']}
validate(instance={'user_entity': 'foo',
'caps': [{'left': 'foo', 'right': 'foo2'}],
'ah': [{'top': 'foo', 'bottom': 'foo2'}],
'oh': {'left': 'foo', 'right': 'foo2'},
'ov': {'top': 'foo', 'bottom': True}}, schema=schema['schema'])
| 2,606 | 36.782609 | 91 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/tests/test_daemon.py
|
# -*- coding: utf-8 -*-
from ..controllers._version import APIVersion
from ..controllers.daemon import Daemon
from ..tests import ControllerTestCase, patch_orch
class DaemonTest(ControllerTestCase):
URL_DAEMON = '/api/daemon'
@classmethod
def setup_server(cls):
cls.setup_controllers([Daemon])
def test_daemon_action(self):
msg = "Scheduled to stop crash.b78cd1164a1b on host 'hostname'"
with patch_orch(True) as fake_client:
fake_client.daemons.action.return_value = msg
payload = {
'action': 'restart',
'container_image': None
}
self._put(f'{self.URL_DAEMON}/crash.b78cd1164a1b', payload, version=APIVersion(0, 1))
self.assertJsonBody(msg)
self.assertStatus(200)
def test_daemon_invalid_action(self):
payload = {
'action': 'invalid',
'container_image': None
}
with patch_orch(True):
self._put(f'{self.URL_DAEMON}/crash.b78cd1164a1b', payload, version=APIVersion(0, 1))
self.assertJsonBody({
'detail': 'Daemon action "invalid" is either not valid or not supported.',
'code': 'invalid_daemon_action',
'component': None
})
self.assertStatus(400)
| 1,345 | 31.047619 | 97 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/tests/test_docs.py
|
# # -*- coding: utf-8 -*-
import unittest
from ..api.doc import SchemaType
from ..controllers import ENDPOINT_MAP, APIDoc, APIRouter, Endpoint, EndpointDoc, RESTController
from ..controllers._version import APIVersion
from ..controllers.docs import Docs
from ..tests import ControllerTestCase
# Dummy controller and endpoint that can be assigned with @EndpointDoc and @GroupDoc
@APIDoc("Group description", group="FooGroup")
@APIRouter("/doctest/", secure=False)
class DecoratedController(RESTController):
RESOURCE_ID = 'doctest'
@EndpointDoc(
description="Endpoint description",
group="BarGroup",
parameters={
'parameter': (int, "Description of parameter"),
},
responses={
200: [{
'my_prop': (str, '200 property desc.')
}],
202: {
'my_prop': (str, '202 property desc.')
},
},
)
@Endpoint(json_response=False)
@RESTController.Resource('PUT', version=APIVersion(0, 1))
def decorated_func(self, parameter):
pass
@RESTController.MethodMap(version=APIVersion(0, 1))
def list(self):
pass
# To assure functionality of @EndpointDoc, @GroupDoc
class DocDecoratorsTest(ControllerTestCase):
@classmethod
def setup_server(cls):
cls.setup_controllers([DecoratedController, Docs], "/test")
def test_group_info_attr(self):
test_ctrl = DecoratedController()
self.assertTrue(hasattr(test_ctrl, 'doc_info'))
self.assertIn('tag_descr', test_ctrl.doc_info)
self.assertIn('tag', test_ctrl.doc_info)
def test_endpoint_info_attr(self):
test_ctrl = DecoratedController()
test_endpoint = test_ctrl.decorated_func
self.assertTrue(hasattr(test_endpoint, 'doc_info'))
self.assertIn('summary', test_endpoint.doc_info)
self.assertIn('tag', test_endpoint.doc_info)
self.assertIn('parameters', test_endpoint.doc_info)
self.assertIn('response', test_endpoint.doc_info)
# To assure functionality of Docs.py
# pylint: disable=protected-access
class DocsTest(ControllerTestCase):
@classmethod
def setup_server(cls):
ENDPOINT_MAP.clear()
cls.setup_controllers([DecoratedController, Docs], "/test")
def test_type_to_str(self):
self.assertEqual(Docs()._type_to_str(str), str(SchemaType.STRING))
self.assertEqual(Docs()._type_to_str(int), str(SchemaType.INTEGER))
self.assertEqual(Docs()._type_to_str(bool), str(SchemaType.BOOLEAN))
self.assertEqual(Docs()._type_to_str(list), str(SchemaType.ARRAY))
self.assertEqual(Docs()._type_to_str(tuple), str(SchemaType.ARRAY))
self.assertEqual(Docs()._type_to_str(float), str(SchemaType.NUMBER))
self.assertEqual(Docs()._type_to_str(object), str(SchemaType.OBJECT))
self.assertEqual(Docs()._type_to_str(None), str(SchemaType.OBJECT))
def test_gen_paths(self):
outcome = Docs().gen_paths(False)['/api/doctest//{doctest}/decorated_func']['put']
self.assertIn('tags', outcome)
self.assertIn('summary', outcome)
self.assertIn('parameters', outcome)
self.assertIn('responses', outcome)
expected_response_content = {
'200': {
APIVersion(0, 1).to_mime_type(): {
'schema': {'type': 'array',
'items': {'type': 'object', 'properties': {
'my_prop': {
'type': 'string',
'description': '200 property desc.'}}},
'required': ['my_prop']}}},
'202': {
APIVersion(0, 1).to_mime_type(): {
'schema': {'type': 'object',
'properties': {'my_prop': {
'type': 'string',
'description': '202 property desc.'}},
'required': ['my_prop']}}
}
}
# Check that a schema of type 'array' is received in the response.
self.assertEqual(expected_response_content['200'], outcome['responses']['200']['content'])
# Check that a schema of type 'object' is received in the response.
self.assertEqual(expected_response_content['202'], outcome['responses']['202']['content'])
def test_gen_method_paths(self):
outcome = Docs().gen_paths(False)['/api/doctest/']['get']
self.assertEqual({APIVersion(0, 1).to_mime_type(): {'type': 'object'}},
outcome['responses']['200']['content'])
def test_gen_paths_all(self):
paths = Docs().gen_paths(False)
for key in paths:
self.assertTrue(any(base in key.split('/')[1] for base in ['api', 'ui-api']))
def test_gen_tags(self):
outcome = Docs._gen_tags(False)
self.assertEqual([{'description': 'Group description', 'name': 'FooGroup'}], outcome)
class TestEndpointDocWrapper(unittest.TestCase):
def test_wrong_param_types(self):
with self.assertRaises(Exception):
EndpointDoc(description=False)
with self.assertRaises(Exception):
EndpointDoc(group=False)
with self.assertRaises(Exception):
EndpointDoc(parameters='wrong parameters')
with self.assertRaises(Exception):
EndpointDoc(responses='wrong response')
def dummy_func():
pass
with self.assertRaises(Exception):
EndpointDoc(parameters={'parameter': 'wrong parameter'})(dummy_func)
def test_split_dict(self):
edoc = EndpointDoc()
data = {
'name1': (int, 'description1'),
'dict_param': ({'name2': (int, 'description2')}, 'description_dict'),
'list_param': ([int, float], 'description_list')
}
expected = [
{
'name': 'name1',
'description': 'description1',
'required': True,
'nested': False,
'type': int
},
{
'name': 'dict_param',
'description': 'description_dict',
'required': True,
'nested': False,
'type': dict,
'nested_params': [
{
'name': 'name2',
'description': 'description2',
'required': True,
'nested': True,
'type': int
}
]
},
{
'name': 'list_param',
'description':
'description_list',
'required': True,
'nested': False,
'type': [int, float]
}
]
res = edoc._split_dict(data, False)
self.assertEqual(res, expected)
def test_split_param(self):
edoc = EndpointDoc()
name = 'foo'
p_type = int
description = 'description'
default_value = 1
expected = {
'name': name,
'description': description,
'required': True,
'nested': False,
'default': default_value,
'type': p_type,
}
res = edoc._split_param(name, p_type, description, default_value=default_value)
self.assertEqual(res, expected)
def test_split_param_nested(self):
edoc = EndpointDoc()
name = 'foo'
p_type = {'name2': (int, 'description2')}, 'description_dict'
description = 'description'
default_value = 1
expected = {
'name': name,
'description': description,
'required': True,
'nested': True,
'default': default_value,
'type': type(p_type),
'nested_params': [
{
'name': 'name2',
'description': 'description2',
'required': True,
'nested': True,
'type': int
}
]
}
res = edoc._split_param(name, p_type, description, default_value=default_value,
nested=True)
self.assertEqual(res, expected)
def test_split_list(self):
edoc = EndpointDoc()
data = [('foo', int), ('foo', float)]
expected = []
res = edoc._split_list(data, True)
self.assertEqual(res, expected)
| 8,653 | 34.908714 | 98 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/tests/test_erasure_code_profile.py
|
# -*- coding: utf-8 -*-
from .. import mgr
from ..controllers.erasure_code_profile import ErasureCodeProfile
from ..tests import ControllerTestCase
class ErasureCodeProfileTest(ControllerTestCase):
@classmethod
def setup_server(cls):
mgr.get.side_effect = lambda key: {
'osd_map': {
'erasure_code_profiles': {
'test': {
'k': '2',
'm': '1'
}
}
},
'health': {'json': '{"status": 1}'},
'fs_map': {'filesystems': []},
}[key]
cls.setup_controllers([ErasureCodeProfile])
def test_list(self):
self._get('/api/erasure_code_profile')
self.assertStatus(200)
self.assertJsonBody([{'k': 2, 'm': 1, 'name': 'test'}])
| 837 | 26.933333 | 65 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/tests/test_exceptions.py
|
# -*- coding: utf-8 -*-
import time
import rados
from ..controllers import Endpoint, RESTController, Router, Task
from ..services.ceph_service import SendCommandError
from ..services.exception import handle_rados_error, \
handle_send_command_error, serialize_dashboard_exception
from ..tests import ControllerTestCase
from ..tools import NotificationQueue, TaskManager, ViewCache
# pylint: disable=W0613
@Router('foo', secure=False)
class FooResource(RESTController):
@Endpoint()
@handle_rados_error('foo')
def no_exception(self, param1, param2):
return [param1, param2]
@Endpoint()
@handle_rados_error('foo')
def error_foo_controller(self):
raise rados.OSError('hi', errno=-42)
@Endpoint()
@handle_send_command_error('foo')
def error_send_command(self):
raise SendCommandError('hi', 'prefix', {}, -42)
@Endpoint()
def error_generic(self):
raise rados.Error('hi')
@Endpoint()
def vc_no_data(self):
@ViewCache(timeout=0)
def _no_data():
time.sleep(0.2)
_no_data()
assert False
@handle_rados_error('foo')
@Endpoint()
def vc_exception(self):
@ViewCache(timeout=10)
def _raise():
raise rados.OSError('hi', errno=-42)
_raise()
assert False
@Endpoint()
def internal_server_error(self):
return 1/0
@handle_send_command_error('foo')
def list(self):
raise SendCommandError('list', 'prefix', {}, -42)
@Endpoint()
@Task('task_exceptions/task_exception', {1: 2}, 1.0,
exception_handler=serialize_dashboard_exception)
@handle_rados_error('foo')
def task_exception(self):
raise rados.OSError('hi', errno=-42)
@Endpoint()
def wait_task_exception(self):
ex, _ = TaskManager.list('task_exceptions/task_exception')
return bool(len(ex))
# pylint: disable=C0102
class Root(object):
foo = FooResource()
class RESTControllerTest(ControllerTestCase):
@classmethod
def setup_server(cls):
NotificationQueue.start_queue()
TaskManager.init()
cls.setup_controllers([FooResource])
@classmethod
def tearDownClass(cls):
NotificationQueue.stop()
def test_no_exception(self):
self._get('/foo/no_exception/a/b')
self.assertStatus(200)
self.assertJsonBody(
['a', 'b']
)
def test_error_foo_controller(self):
self._get('/foo/error_foo_controller')
self.assertStatus(400)
self.assertJsonBody(
{'detail': '[errno -42] hi', 'code': "42", 'component': 'foo'}
)
def test_error_send_command(self):
self._get('/foo/error_send_command')
self.assertStatus(400)
self.assertJsonBody(
{'detail': '[errno -42] hi', 'code': "42", 'component': 'foo'}
)
def test_error_send_command_list(self):
self._get('/foo/')
self.assertStatus(400)
self.assertJsonBody(
{'detail': '[errno -42] list', 'code': "42", 'component': 'foo'}
)
def test_error_foo_generic(self):
self._get('/foo/error_generic')
self.assertJsonBody({'detail': 'hi', 'code': 'Error', 'component': None})
self.assertStatus(400)
def test_viewcache_no_data(self):
self._get('/foo/vc_no_data')
self.assertStatus(200)
self.assertJsonBody({'status': ViewCache.VALUE_NONE, 'value': None})
def test_viewcache_exception(self):
self._get('/foo/vc_exception')
self.assertStatus(400)
self.assertJsonBody(
{'detail': '[errno -42] hi', 'code': "42", 'component': 'foo'}
)
def test_task_exception(self):
self._get('/foo/task_exception')
self.assertStatus(400)
self.assertJsonBody(
{'detail': '[errno -42] hi', 'code': "42", 'component': 'foo',
'task': {'name': 'task_exceptions/task_exception', 'metadata': {'1': 2}}}
)
self._get('/foo/wait_task_exception')
while self.json_body():
time.sleep(0.5)
self._get('/foo/wait_task_exception')
def test_internal_server_error(self):
self._get('/foo/internal_server_error')
self.assertStatus(500)
self.assertIn('unexpected condition', self.json_body()['detail'])
def test_404(self):
self._get('/foonot_found')
self.assertStatus(404)
self.assertIn('detail', self.json_body())
| 4,523 | 27.099379 | 86 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/tests/test_feature_toggles.py
|
# -*- coding: utf-8 -*-
import unittest
try:
from mock import Mock, patch
except ImportError:
from unittest.mock import Mock, patch
from ..plugins.feature_toggles import Actions, Features, FeatureToggles
from ..tests import KVStoreMockMixin
class SettingsTest(unittest.TestCase, KVStoreMockMixin):
@classmethod
def setUpClass(cls):
cls.mock_kv_store()
cls.CONFIG_KEY_DICT['url_prefix'] = ''
# Mock MODULE_OPTIONS
from .. import mgr
cls.mgr = mgr
# Populate real endpoint map
from ..controllers import BaseController
cls.controllers = BaseController.load_controllers()
# Initialize FeatureToggles plugin
cls.plugin = FeatureToggles()
cls.CONFIG_KEY_DICT.update(
{k['name']: k['default'] for k in cls.plugin.get_options()})
cls.plugin.setup()
def test_filter_request_when_all_features_enabled(self):
"""
This test iterates over all the registered endpoints to ensure that, with default
feature toggles, none is disabled.
"""
import cherrypy
request = Mock()
for controller in self.controllers:
request.path_info = controller.get_path()
try:
self.plugin.filter_request_before_handler(request)
except cherrypy.HTTPError:
self.fail("Request filtered {} and it shouldn't".format(
request.path_info))
def test_filter_request_when_some_feature_enabled(self):
"""
This test focuses on a single feature and checks whether it's actually
disabled
"""
import cherrypy
self.plugin.register_commands()['handle_command'](
self.mgr, Actions.DISABLE, [Features.CEPHFS])
with patch.object(self.plugin, '_get_feature_from_request',
return_value=Features.CEPHFS):
with self.assertRaises(cherrypy.HTTPError):
request = Mock()
self.plugin.filter_request_before_handler(request)
| 2,082 | 31.046154 | 89 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/tests/test_grafana.py
|
import json
import unittest
try:
from mock import patch
except ImportError:
from unittest.mock import patch
from requests import RequestException
from ..controllers.grafana import Grafana
from ..grafana import GrafanaRestClient
from ..settings import Settings
from ..tests import ControllerTestCase, KVStoreMockMixin
class GrafanaTest(ControllerTestCase, KVStoreMockMixin):
@classmethod
def setup_server(cls):
cls.setup_controllers([Grafana])
def setUp(self):
self.mock_kv_store()
@staticmethod
def server_settings(
url='http://localhost:3000',
user='admin',
password='admin',
):
if url is not None:
Settings.GRAFANA_API_URL = url
if user is not None:
Settings.GRAFANA_API_USERNAME = user
if password is not None:
Settings.GRAFANA_API_PASSWORD = password
def test_url(self):
self.server_settings()
self._get('/api/grafana/url')
self.assertStatus(200)
self.assertJsonBody({'instance': 'http://localhost:3000'})
@patch('dashboard.controllers.grafana.GrafanaRestClient.url_validation')
def test_validation_endpoint_returns(self, url_validation):
"""
The point of this test is to see that `validation` is an active endpoint that returns a 200
status code.
"""
url_validation.return_value = b'404'
self.server_settings()
self._get('/api/grafana/validation/foo')
self.assertStatus(200)
self.assertBody(b'"404"')
@patch('dashboard.controllers.grafana.GrafanaRestClient.url_validation')
def test_validation_endpoint_fails(self, url_validation):
url_validation.side_effect = RequestException
self.server_settings()
self._get('/api/grafana/validation/bar')
self.assertStatus(400)
self.assertJsonBody({'detail': '', 'code': 'Error', 'component': 'grafana'})
def test_dashboards_unavailable_no_url(self):
self.server_settings(url="")
self._post('/api/grafana/dashboards')
self.assertStatus(500)
@patch('dashboard.controllers.grafana.GrafanaRestClient.push_dashboard')
def test_dashboards_unavailable_no_user(self, pd):
pd.side_effect = RequestException
self.server_settings(user="")
self._post('/api/grafana/dashboards')
self.assertStatus(500)
def test_dashboards_unavailable_no_password(self):
self.server_settings(password="")
self._post('/api/grafana/dashboards')
self.assertStatus(500)
class GrafanaRestClientTest(unittest.TestCase, KVStoreMockMixin):
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
}
payload = json.dumps({
'dashboard': 'foo',
'overwrite': True
})
def setUp(self):
self.mock_kv_store()
Settings.GRAFANA_API_URL = 'https://foo/bar'
Settings.GRAFANA_API_USERNAME = 'xyz'
Settings.GRAFANA_API_PASSWORD = 'abc'
Settings.GRAFANA_API_SSL_VERIFY = True
def test_ssl_verify_url_validation(self):
with patch('requests.request') as mock_request:
rest_client = GrafanaRestClient()
rest_client.url_validation('FOO', Settings.GRAFANA_API_URL)
mock_request.assert_called_with('FOO', Settings.GRAFANA_API_URL,
verify=True)
def test_no_ssl_verify_url_validation(self):
Settings.GRAFANA_API_SSL_VERIFY = False
with patch('requests.request') as mock_request:
rest_client = GrafanaRestClient()
rest_client.url_validation('BAR', Settings.GRAFANA_API_URL)
mock_request.assert_called_with('BAR', Settings.GRAFANA_API_URL,
verify=False)
def test_ssl_verify_push_dashboard(self):
with patch('requests.post') as mock_request:
rest_client = GrafanaRestClient()
rest_client.push_dashboard('foo')
mock_request.assert_called_with(
Settings.GRAFANA_API_URL + '/api/dashboards/db',
auth=(Settings.GRAFANA_API_USERNAME,
Settings.GRAFANA_API_PASSWORD),
data=self.payload, headers=self.headers, verify=True)
def test_no_ssl_verify_push_dashboard(self):
Settings.GRAFANA_API_SSL_VERIFY = False
with patch('requests.post') as mock_request:
rest_client = GrafanaRestClient()
rest_client.push_dashboard('foo')
mock_request.assert_called_with(
Settings.GRAFANA_API_URL + '/api/dashboards/db',
auth=(Settings.GRAFANA_API_USERNAME,
Settings.GRAFANA_API_PASSWORD),
data=self.payload, headers=self.headers, verify=False)
| 4,870 | 35.350746 | 99 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/tests/test_home.py
|
import logging
import os
try:
import mock
except ImportError:
import unittest.mock as mock
from .. import mgr
from ..controllers.home import HomeController, LanguageMixin
from ..tests import ControllerTestCase, FakeFsMixin
logger = logging.getLogger()
class HomeTest(ControllerTestCase, FakeFsMixin):
@classmethod
def setup_server(cls):
frontend_path = mgr.get_frontend_path()
cls.fs.reset()
cls.fs.create_dir(frontend_path)
cls.fs.create_file(
os.path.join(frontend_path, '..', 'package.json'),
contents='{"config":{"locale": "en"}}')
with mock.patch(cls.builtins_open, new=cls.f_open),\
mock.patch('os.listdir', new=cls.f_os.listdir):
lang = LanguageMixin()
cls.fs.create_file(
os.path.join(lang.DEFAULT_LANGUAGE_PATH, 'index.html'),
contents='<!doctype html><html lang="en"><body></body></html>')
cls.setup_controllers([HomeController])
@mock.patch(FakeFsMixin.builtins_open, new=FakeFsMixin.f_open)
@mock.patch('os.stat', new=FakeFsMixin.f_os.stat)
@mock.patch('os.listdir', new=FakeFsMixin.f_os.listdir)
def test_home_default_lang(self):
self._get('/')
self.assertStatus(200)
logger.info(self.body)
self.assertIn('<html lang="en">', self.body.decode('utf-8'))
@mock.patch(FakeFsMixin.builtins_open, new=FakeFsMixin.f_open)
@mock.patch('os.stat', new=FakeFsMixin.f_os.stat)
@mock.patch('os.listdir', new=FakeFsMixin.f_os.listdir)
def test_home_uplevel_check(self):
self._get('/../../../../../../etc/shadow')
self.assertStatus(403)
@mock.patch(FakeFsMixin.builtins_open, new=FakeFsMixin.f_open)
@mock.patch('os.stat', new=FakeFsMixin.f_os.stat)
@mock.patch('os.listdir', new=FakeFsMixin.f_os.listdir)
def test_home_en(self):
self._get('/', headers=[('Accept-Language', 'en-US')])
self.assertStatus(200)
logger.info(self.body)
self.assertIn('<html lang="en">', self.body.decode('utf-8'))
@mock.patch(FakeFsMixin.builtins_open, new=FakeFsMixin.f_open)
@mock.patch('os.stat', new=FakeFsMixin.f_os.stat)
@mock.patch('os.listdir', new=FakeFsMixin.f_os.listdir)
def test_home_non_supported_lang(self):
self._get('/', headers=[('Accept-Language', 'NO-NO')])
self.assertStatus(200)
logger.info(self.body)
self.assertIn('<html lang="en">', self.body.decode('utf-8'))
@mock.patch(FakeFsMixin.builtins_open, new=FakeFsMixin.f_open)
@mock.patch('os.stat', new=FakeFsMixin.f_os.stat)
@mock.patch('os.listdir', new=FakeFsMixin.f_os.listdir)
def test_home_multiple_subtags_lang(self):
self._get('/', headers=[('Accept-Language', 'zh-Hans-CN')])
self.assertStatus(200)
| 2,839 | 37.378378 | 79 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.