file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
terminalInstance.ts
/*--------------------------------------------------------------------------------------------- * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for license information. *--------------------------------------------------------------------------------------------*/ import * as path from 'vs/base/common/path'; import * as dom from 'vs/base/browser/dom'; import { StandardKeyboardEvent } from 'vs/base/browser/keyboardEvent'; import { debounce } from 'vs/base/common/decorators'; import { Emitter, Event } from 'vs/base/common/event'; import { KeyCode } from 'vs/base/common/keyCodes'; import { IDisposable, dispose, Disposable, toDisposable } from 'vs/base/common/lifecycle'; import { TabFocus } from 'vs/editor/common/config/commonEditorConfig'; import * as nls from 'vs/nls'; import { IClipboardService } from 'vs/platform/clipboard/common/clipboardService'; import { ConfigurationTarget, IConfigurationService } from 'vs/platform/configuration/common/configuration'; import { IContextKey, IContextKeyService } from 'vs/platform/contextkey/common/contextkey'; import { IInstantiationService } from 'vs/platform/instantiation/common/instantiation'; import { IKeybindingService } from 'vs/platform/keybinding/common/keybinding'; import { ILogService } from 'vs/platform/log/common/log'; import { INotificationService, IPromptChoice, Severity } from 'vs/platform/notification/common/notification'; import { IStorageService, StorageScope, StorageTarget } from 'vs/platform/storage/common/storage'; import { activeContrastBorder, scrollbarSliderActiveBackground, scrollbarSliderBackground, scrollbarSliderHoverBackground } from 'vs/platform/theme/common/colorRegistry'; import { ICssStyleCollector, IColorTheme, IThemeService, registerThemingParticipant } from 'vs/platform/theme/common/themeService'; import { PANEL_BACKGROUND, SIDE_BAR_BACKGROUND } from 'vs/workbench/common/theme'; import { TerminalWidgetManager } from 'vs/workbench/contrib/terminal/browser/widgets/widgetManager'; import { ITerminalProcessManager, KEYBINDING_CONTEXT_TERMINAL_TEXT_SELECTED, NEVER_MEASURE_RENDER_TIME_STORAGE_KEY, ProcessState, TERMINAL_VIEW_ID, KEYBINDING_CONTEXT_TERMINAL_A11Y_TREE_FOCUS, INavigationMode, DEFAULT_COMMANDS_TO_SKIP_SHELL, TERMINAL_CREATION_COMMANDS, KEYBINDING_CONTEXT_TERMINAL_ALT_BUFFER_ACTIVE, SUGGESTED_RENDERER_TYPE, ITerminalProfileResolverService } from 'vs/workbench/contrib/terminal/common/terminal'; import { ansiColorIdentifiers, ansiColorMap, TERMINAL_BACKGROUND_COLOR, TERMINAL_CURSOR_BACKGROUND_COLOR, TERMINAL_CURSOR_FOREGROUND_COLOR, TERMINAL_FOREGROUND_COLOR, TERMINAL_SELECTION_BACKGROUND_COLOR } from 'vs/workbench/contrib/terminal/common/terminalColorRegistry'; import { TerminalConfigHelper } from 'vs/workbench/contrib/terminal/browser/terminalConfigHelper'; import { TerminalLinkManager } from 'vs/workbench/contrib/terminal/browser/links/terminalLinkManager'; import { IAccessibilityService } from 'vs/platform/accessibility/common/accessibility'; import { ITerminalInstanceService, ITerminalInstance, ITerminalExternalLinkProvider, IRequestAddInstanceToGroupEvent } from 'vs/workbench/contrib/terminal/browser/terminal'; import { TerminalProcessManager } from 'vs/workbench/contrib/terminal/browser/terminalProcessManager'; import type { Terminal as XTermTerminal, IBuffer, ITerminalAddon, RendererType, ITheme } from 'xterm'; import type { SearchAddon, ISearchOptions } from 'xterm-addon-search'; import type { Unicode11Addon } from 'xterm-addon-unicode11'; import type { WebglAddon } from 'xterm-addon-webgl'; import { CommandTrackerAddon } from 'vs/workbench/contrib/terminal/browser/addons/commandTrackerAddon'; import { NavigationModeAddon } from 'vs/workbench/contrib/terminal/browser/addons/navigationModeAddon'; import { XTermCore } from 'vs/workbench/contrib/terminal/browser/xterm-private'; import { IEditorOptions } from 'vs/editor/common/config/editorOptions'; import { IViewsService, IViewDescriptorService, ViewContainerLocation } from 'vs/workbench/common/views'; import { EnvironmentVariableInfoWidget } from 'vs/workbench/contrib/terminal/browser/widgets/environmentVariableInfoWidget'; import { TerminalLaunchHelpAction } from 'vs/workbench/contrib/terminal/browser/terminalActions'; import { TypeAheadAddon } from 'vs/workbench/contrib/terminal/browser/terminalTypeAheadAddon'; import { BrowserFeatures } from 'vs/base/browser/canIUse'; import { IPreferencesService } from 'vs/workbench/services/preferences/common/preferences'; import { IEnvironmentVariableInfo } from 'vs/workbench/contrib/terminal/common/environmentVariable'; import { IProcessDataEvent, IShellLaunchConfig, ITerminalDimensionsOverride, ITerminalLaunchError, TerminalShellType, TerminalSettingId, TitleEventSource, TerminalIcon } from 'vs/platform/terminal/common/terminal'; import { IProductService } from 'vs/platform/product/common/productService'; import { formatMessageForTerminal } from 'vs/workbench/contrib/terminal/common/terminalStrings'; import { AutoOpenBarrier } from 'vs/base/common/async'; import { Codicon, iconRegistry } from 'vs/base/common/codicons'; import { ITerminalStatusList, TerminalStatus, TerminalStatusList } from 'vs/workbench/contrib/terminal/browser/terminalStatusList'; import { IQuickInputService, IQuickPickItem } from 'vs/platform/quickinput/common/quickInput'; import { IWorkbenchEnvironmentService } from 'vs/workbench/services/environment/common/environmentService'; import { isMacintosh, isWindows, OperatingSystem, OS } from 'vs/base/common/platform'; import { URI } from 'vs/base/common/uri'; import { Schemas } from 'vs/base/common/network'; import { DataTransfers } from 'vs/base/browser/dnd'; import { DragAndDropObserver, IDragAndDropObserverCallbacks } from 'vs/workbench/browser/dnd'; // How long in milliseconds should an average frame take to render for a notification to appear // which suggests the fallback DOM-based renderer const SLOW_CANVAS_RENDER_THRESHOLD = 50; const NUMBER_OF_FRAMES_TO_MEASURE = 20; const enum Constants { /** * The maximum amount of milliseconds to wait for a container before starting to create the * terminal process. This period helps ensure the terminal has good initial dimensions to work * with if it's going to be a foreground terminal. */ WaitForContainerThreshold = 100 } let xtermConstructor: Promise<typeof XTermTerminal> | undefined; interface ICanvasDimensions { width: number; height: number; } interface IGridDimensions { cols: number; rows: number; } export class TerminalInstance extends Disposable implements ITerminalInstance { private static _lastKnownCanvasDimensions: ICanvasDimensions | undefined; private static _lastKnownGridDimensions: IGridDimensions | undefined; private static _instanceIdCounter = 1; private _processManager!: ITerminalProcessManager; private _pressAnyKeyToCloseListener: IDisposable | undefined; private _instanceId: number; private _latestXtermWriteData: number = 0; private _latestXtermParseData: number = 0; private _isExiting: boolean; private _hadFocusOnExit: boolean; private _isVisible: boolean; private _isDisposed: boolean; private _exitCode: number | undefined; private _skipTerminalCommands: string[]; private _shellType: TerminalShellType; private _title: string = ''; private _titleSource: TitleEventSource = TitleEventSource.Process; private _container: HTMLElement | undefined; private _wrapperElement: (HTMLElement & { xterm?: XTermTerminal }) | undefined; private _xterm: XTermTerminal | undefined; private _xtermCore: XTermCore | undefined; private _xtermTypeAhead: TypeAheadAddon | undefined; private _xtermSearch: SearchAddon | undefined; private _xtermUnicode11: Unicode11Addon | undefined; private _xtermElement: HTMLDivElement | undefined; private _terminalHasTextContextKey: IContextKey<boolean>; private _terminalA11yTreeFocusContextKey: IContextKey<boolean>; private _cols: number = 0; private _rows: number = 0; private _dimensionsOverride: ITerminalDimensionsOverride | undefined; private _xtermReadyPromise: Promise<XTermTerminal>; private _titleReadyPromise: Promise<string>; private _titleReadyComplete: ((title: string) => any) | undefined; private _areLinksReady: boolean = false; private _initialDataEvents: string[] | undefined = []; private _containerReadyBarrier: AutoOpenBarrier; private _messageTitleDisposable: IDisposable | undefined; private _widgetManager: TerminalWidgetManager = this._instantiationService.createInstance(TerminalWidgetManager); private _linkManager: TerminalLinkManager | undefined; private _environmentInfo: { widget: EnvironmentVariableInfoWidget, disposable: IDisposable } | undefined; private _webglAddon: WebglAddon | undefined; private _commandTrackerAddon: CommandTrackerAddon | undefined; private _navigationModeAddon: INavigationMode & ITerminalAddon | undefined; private _dndObserver: IDisposable | undefined; private _lastLayoutDimensions: dom.Dimension | undefined; private _hasHadInput: boolean; readonly statusList: ITerminalStatusList = new TerminalStatusList(); disableLayout: boolean = false; get instanceId(): number { return this._instanceId; } get resource(): URI { return URI.from({ scheme: Schemas.vscodeTerminal, path: this.title, fragment: this.instanceId.toString(), }); } get cols(): number { if (this._dimensionsOverride && this._dimensionsOverride.cols) { if (this._dimensionsOverride.forceExactSize) { return this._dimensionsOverride.cols; } return Math.min(Math.max(this._dimensionsOverride.cols, 2), this._cols); } return this._cols; } get rows(): number { if (this._dimensionsOverride && this._dimensionsOverride.rows) { if (this._dimensionsOverride.forceExactSize) { return this._dimensionsOverride.rows; } return Math.min(Math.max(this._dimensionsOverride.rows, 2), this._rows); } return this._rows; } get maxCols(): number { return this._cols; } get maxRows(): number { return this._rows; } // TODO: Ideally processId would be merged into processReady get processId(): number | undefined { return this._processManager.shellProcessId; } // TODO: How does this work with detached processes? // TODO: Should this be an event as it can fire twice? get processReady(): Promise<void> { return this._processManager.ptyProcessReady; } get areLinksReady(): boolean { return this._areLinksReady; } get initialDataEvents(): string[] | undefined { return this._initialDataEvents; } get exitCode(): number | undefined { return this._exitCode; } get hadFocusOnExit(): boolean { return this._hadFocusOnExit; } get isTitleSetByProcess(): boolean { return !!this._messageTitleDisposable; } get shellLaunchConfig(): IShellLaunchConfig { return this._shellLaunchConfig; } get shellType(): TerminalShellType { return this._shellType; } get commandTracker(): CommandTrackerAddon | undefined { return this._commandTrackerAddon; } get navigationMode(): INavigationMode | undefined { return this._navigationModeAddon; } get isDisconnected(): boolean { return this._processManager.isDisconnected; } get isRemote(): boolean { return this._processManager.remoteAuthority !== undefined; } get title(): string { return this._title; } get titleSource(): TitleEventSource { return this._titleSource; } get icon(): TerminalIcon | undefined { return this._getIcon(); } get color(): string | undefined { return this._getColor(); } private readonly _onExit = new Emitter<number | undefined>(); get onExit(): Event<number | undefined> { return this._onExit.event; } private readonly _onDisposed = new Emitter<ITerminalInstance>(); get onDisposed(): Event<ITerminalInstance> { return this._onDisposed.event; } private readonly _onFocused = new Emitter<ITerminalInstance>(); get onFocused(): Event<ITerminalInstance> { return this._onFocused.event; } private readonly _onProcessIdReady = new Emitter<ITerminalInstance>(); get onProcessIdReady(): Event<ITerminalInstance> { return this._onProcessIdReady.event; } private readonly _onLinksReady = new Emitter<ITerminalInstance>(); get onLinksReady(): Event<ITerminalInstance> { return this._onLinksReady.event; } private readonly _onTitleChanged = new Emitter<ITerminalInstance>(); get onTitleChanged(): Event<ITerminalInstance> { return this._onTitleChanged.event; } private readonly _onIconChanged = new Emitter<ITerminalInstance>(); get onIconChanged(): Event<ITerminalInstance> { return this._onIconChanged.event; } private readonly _onData = new Emitter<string>(); get onData(): Event<string> { return this._onData.event; } private readonly _onBinary = new Emitter<string>(); get onBinary(): Event<string> { return this._onBinary.event; } private readonly _onLineData = new Emitter<string>(); get onLineData(): Event<string> { return this._onLineData.event; } private readonly _onRequestExtHostProcess = new Emitter<ITerminalInstance>(); get onRequestExtHostProcess(): Event<ITerminalInstance> { return this._onRequestExtHostProcess.event; } private readonly _onDimensionsChanged = new Emitter<void>(); get onDimensionsChanged(): Event<void> { return this._onDimensionsChanged.event; } private readonly _onMaximumDimensionsChanged = new Emitter<void>(); get onMaximumDimensionsChanged(): Event<void> { return this._onMaximumDimensionsChanged.event; } private readonly _onFocus = new Emitter<ITerminalInstance>(); get onFocus(): Event<ITerminalInstance> { return this._onFocus.event; } private readonly _onRequestAddInstanceToGroup = new Emitter<IRequestAddInstanceToGroupEvent>(); get onRequestAddInstanceToGroup(): Event<IRequestAddInstanceToGroupEvent> { return this._onRequestAddInstanceToGroup.event; } constructor( private readonly _terminalFocusContextKey: IContextKey<boolean>, private readonly _terminalShellTypeContextKey: IContextKey<string>, private readonly _terminalAltBufferActiveContextKey: IContextKey<boolean>, private readonly _configHelper: TerminalConfigHelper, private _shellLaunchConfig: IShellLaunchConfig, @ITerminalInstanceService private readonly _terminalInstanceService: ITerminalInstanceService, @ITerminalProfileResolverService private readonly _terminalProfileResolverService: ITerminalProfileResolverService, @IContextKeyService private readonly _contextKeyService: IContextKeyService, @IKeybindingService private readonly _keybindingService: IKeybindingService, @INotificationService private readonly _notificationService: INotificationService, @IPreferencesService private readonly _preferencesService: IPreferencesService, @IViewsService private readonly _viewsService: IViewsService, @IInstantiationService private readonly _instantiationService: IInstantiationService, @IClipboardService private readonly _clipboardService: IClipboardService, @IThemeService private readonly _themeService: IThemeService, @IConfigurationService private readonly _configurationService: IConfigurationService, @ILogService private readonly _logService: ILogService, @IStorageService private readonly _storageService: IStorageService, @IAccessibilityService private readonly _accessibilityService: IAccessibilityService, @IViewDescriptorService private readonly _viewDescriptorService: IViewDescriptorService, @IProductService private readonly _productService: IProductService, @IQuickInputService private readonly _quickInputService: IQuickInputService, @IWorkbenchEnvironmentService workbenchEnvironmentService: IWorkbenchEnvironmentService ) { super(); this._skipTerminalCommands = []; this._isExiting = false; this._hadFocusOnExit = false; this._isVisible = false; this._isDisposed = false; this._instanceId = TerminalInstance._instanceIdCounter++; this._hasHadInput = false; this._titleReadyPromise = new Promise<string>(c => { this._titleReadyComplete = c; }); this._terminalHasTextContextKey = KEYBINDING_CONTEXT_TERMINAL_TEXT_SELECTED.bindTo(this._contextKeyService); this._terminalA11yTreeFocusContextKey = KEYBINDING_CONTEXT_TERMINAL_A11Y_TREE_FOCUS.bindTo(this._contextKeyService); this._terminalAltBufferActiveContextKey = KEYBINDING_CONTEXT_TERMINAL_ALT_BUFFER_ACTIVE.bindTo(this._contextKeyService); this._logService.trace(`terminalInstance#ctor (instanceId: ${this.instanceId})`, this._shellLaunchConfig); // Resolve just the icon ahead of time so that it shows up immediately in the tabs. This is // disabled in remote because this needs to be sync and the OS may differ on the remote // which would result in the wrong profile being selected and the wrong icon being // permanently attached to the terminal. if (!this.shellLaunchConfig.executable && !workbenchEnvironmentService.remoteAuthority) { this._terminalProfileResolverService.resolveIcon(this._shellLaunchConfig, OS); } this._initDimensions(); this._createProcessManager(); this._register(toDisposable(() => this._dndObserver?.dispose())); this._containerReadyBarrier = new AutoOpenBarrier(Constants.WaitForContainerThreshold); this._xtermReadyPromise = this._createXterm(); this._xtermReadyPromise.then(async () => { // Wait for a period to allow a container to be ready await this._containerReadyBarrier.wait(); await this._createProcess(); // Re-establish the title after reconnect if (this.shellLaunchConfig.attachPersistentProcess) { this.setTitle(this.shellLaunchConfig.attachPersistentProcess.title, this.shellLaunchConfig.attachPersistentProcess.titleSource); } }); this.addDisposable(this._configurationService.onDidChangeConfiguration(e => { if (e.affectsConfiguration('terminal.integrated') || e.affectsConfiguration('editor.fastScrollSensitivity') || e.affectsConfiguration('editor.mouseWheelScrollSensitivity') || e.affectsConfiguration('editor.multiCursorModifier')) { this.updateConfig(); // HACK: Trigger another async layout to ensure xterm's CharMeasure is ready to use, // this hack can be removed when https://github.com/xtermjs/xterm.js/issues/702 is // supported. this.setVisible(this._isVisible); } if (e.affectsConfiguration(TerminalSettingId.UnicodeVersion)) { this._updateUnicodeVersion(); } if (e.affectsConfiguration('editor.accessibilitySupport')) { this.updateAccessibilitySupport(); } if (e.affectsConfiguration(TerminalSettingId.GpuAcceleration)) { this._storageService.remove(SUGGESTED_RENDERER_TYPE, StorageScope.GLOBAL); } })); // Clear out initial data events after 10 seconds, hopefully extension hosts are up and // running at that point. let initialDataEventsTimeout: number | undefined = window.setTimeout(() => { initialDataEventsTimeout = undefined; this._initialDataEvents = undefined; }, 10000); this._register(toDisposable(() => { if (initialDataEventsTimeout) { window.clearTimeout(initialDataEventsTimeout); } })); } private _getIcon(): TerminalIcon | undefined { const icon = this._shellLaunchConfig.icon || this._shellLaunchConfig.attachPersistentProcess?.icon; if (!icon) { return this._processManager.processState >= ProcessState.Launching ? Codicon.terminal : undefined; } return icon; } private _getColor(): string | undefined { if (this.shellLaunchConfig.color) { return this.shellLaunchConfig.color; } if (this.shellLaunchConfig?.attachPersistentProcess?.color) { return this.shellLaunchConfig.attachPersistentProcess.color; } if (this._processManager.processState >= ProcessState.Launching) { return undefined; } return undefined; } addDisposable(disposable: IDisposable): void { this._register(disposable); } private _initDimensions(): void { // The terminal panel needs to have been created if (!this._container) { return; } const computedStyle = window.getComputedStyle(this._container.parentElement!); const width = parseInt(computedStyle.getPropertyValue('width').replace('px', ''), 10); const height = parseInt(computedStyle.getPropertyValue('height').replace('px', ''), 10); this._evaluateColsAndRows(width, height); } /** * Evaluates and sets the cols and rows of the terminal if possible. * @param width The width of the container. * @param height The height of the container. * @return The terminal's width if it requires a layout. */ private _evaluateColsAndRows(width: number, height: number): number | null { // Ignore if dimensions are undefined or 0 if (!width || !height) { this._setLastKnownColsAndRows(); return null; } const dimension = this._getDimension(width, height); if (!dimension) { this._setLastKnownColsAndRows(); return null; } const font = this._configHelper.getFont(this._xtermCore); if (!font.charWidth || !font.charHeight) { this._setLastKnownColsAndRows(); return null; } // Because xterm.js converts from CSS pixels to actual pixels through // the use of canvas, window.devicePixelRatio needs to be used here in // order to be precise. font.charWidth/charHeight alone as insufficient // when window.devicePixelRatio changes. const scaledWidthAvailable = dimension.width * window.devicePixelRatio; const scaledCharWidth = font.charWidth * window.devicePixelRatio + font.letterSpacing; const newCols = Math.max(Math.floor(scaledWidthAvailable / scaledCharWidth), 1); const scaledHeightAvailable = dimension.height * window.devicePixelRatio; const scaledCharHeight = Math.ceil(font.charHeight * window.devicePixelRatio); const scaledLineHeight = Math.floor(scaledCharHeight * font.lineHeight); const newRows = Math.max(Math.floor(scaledHeightAvailable / scaledLineHeight), 1); if (this._cols !== newCols || this._rows !== newRows) { this._cols = newCols; this._rows = newRows; this._fireMaximumDimensionsChanged(); } return dimension.width; } private _setLastKnownColsAndRows(): void { if (TerminalInstance._lastKnownGridDimensions) { this._cols = TerminalInstance._lastKnownGridDimensions.cols; this._rows = TerminalInstance._lastKnownGridDimensions.rows; } } @debounce(50) private _fireMaximumDimensionsChanged(): void { this._onMaximumDimensionsChanged.fire(); } private _getDimension(width: number, height: number): ICanvasDimensions | undefined { // The font needs to have been initialized const font = this._configHelper.getFont(this._xtermCore); if (!font || !font.charWidth || !font.charHeight) { return undefined; } if (!this._wrapperElement) { return undefined; } const wrapperElementStyle = getComputedStyle(this._wrapperElement); const marginLeft = parseInt(wrapperElementStyle.marginLeft!.split('px')[0], 10); const marginRight = parseInt(wrapperElementStyle.marginRight!.split('px')[0], 10); const bottom = parseInt(wrapperElementStyle.bottom!.split('px')[0], 10); const innerWidth = width - marginLeft - marginRight; const innerHeight = height - bottom - 1; TerminalInstance._lastKnownCanvasDimensions = new dom.Dimension(innerWidth, innerHeight); return TerminalInstance._lastKnownCanvasDimensions; } get persistentProcessId(): number | undefined { return this._processManager.persistentProcessId; } get shouldPersist(): boolean { return this._processManager.shouldPersist; } private async _getXtermConstructor(): Promise<typeof XTermTerminal> { if (xtermConstructor) { return xtermConstructor; } xtermConstructor = new Promise<typeof XTermTerminal>(async (resolve) => { const Terminal = await this._terminalInstanceService.getXtermConstructor(); // Localize strings Terminal.strings.promptLabel = nls.localize('terminal.integrated.a11yPromptLabel', 'Terminal input'); Terminal.strings.tooMuchOutput = nls.localize('terminal.integrated.a11yTooMuchOutput', 'Too much output to announce, navigate to rows manually to read'); resolve(Terminal); }); return xtermConstructor; } /** * Create xterm.js instance and attach data listeners. */ protected async _createXterm(): Promise<XTermTerminal> { const Terminal = await this._getXtermConstructor(); const font = this._configHelper.getFont(undefined, true); const config = this._configHelper.config; const editorOptions = this._configurationService.getValue<IEditorOptions>('editor'); let xtermRendererType: RendererType; if (config.gpuAcceleration === 'auto') { // Set the builtin renderer to canvas, even when webgl is being used since it's an addon const suggestedRendererType = this._storageService.get(SUGGESTED_RENDERER_TYPE, StorageScope.GLOBAL); xtermRendererType = suggestedRendererType === 'dom' ? 'dom' : 'canvas'; } else { xtermRendererType = config.gpuAcceleration === 'on' ? 'canvas' : 'dom'; } const xterm = new Terminal({ // TODO: Replace null with undefined when https://github.com/xtermjs/xterm.js/issues/3329 is resolved cols: this._cols || null as any, rows: this._rows || null as any, altClickMovesCursor: config.altClickMovesCursor && editorOptions.multiCursorModifier === 'alt', scrollback: config.scrollback, theme: this._getXtermTheme(), drawBoldTextInBrightColors: config.drawBoldTextInBrightColors, fontFamily: font.fontFamily, fontWeight: config.fontWeight, fontWeightBold: config.fontWeightBold, fontSize: font.fontSize, letterSpacing: font.letterSpacing, lineHeight: font.lineHeight, minimumContrastRatio: config.minimumContrastRatio, bellStyle: 'none', macOptionIsMeta: config.macOptionIsMeta, macOptionClickForcesSelection: config.macOptionClickForcesSelection, rightClickSelectsWord: config.rightClickBehavior === 'selectWord', fastScrollModifier: 'alt', fastScrollSensitivity: editorOptions.fastScrollSensitivity, scrollSensitivity: editorOptions.mouseWheelScrollSensitivity, rendererType: xtermRendererType, wordSeparator: config.wordSeparators }); this._xterm = xterm; this._xtermCore = (xterm as any)._core as XTermCore; this._updateUnicodeVersion(); this.updateAccessibilitySupport(); this._terminalInstanceService.getXtermSearchConstructor().then(addonCtor => { this._xtermSearch = new addonCtor(); xterm.loadAddon(this._xtermSearch); }); if (this._shellLaunchConfig.initialText) { this._xterm.writeln(this._shellLaunchConfig.initialText); } // Delay the creation of the bell listener to avoid showing the bell when the terminal // starts up or reconnects setTimeout(() => { this._xterm?.onBell(() => { if (this._configHelper.config.enableBell) { this.statusList.add({ id: TerminalStatus.Bell, severity: Severity.Warning, icon: Codicon.bell, tooltip: nls.localize('bellStatus', "Bell") }, this._configHelper.config.bellDuration); } }); }, 1000); this._xterm.onLineFeed(() => this._onLineFeed()); this._xterm.onKey(e => this._onKey(e.key, e.domEvent)); this._xterm.onSelectionChange(async () => this._onSelectionChange()); this._xterm.buffer.onBufferChange(() => this._refreshAltBufferContextKey()); this._processManager.onProcessData(e => this._onProcessData(e)); this._xterm.onData(data => this._processManager.write(data)); this._xterm.onBinary(data => this._processManager.processBinary(data)); this.processReady.then(async () => { if (this._linkManager) { this._linkManager.processCwd = await this._processManager.getInitialCwd(); } }); // Init winpty compat and link handler after process creation as they rely on the // underlying process OS this._processManager.onProcessReady((processTraits) => { if (this._processManager.os === OperatingSystem.Windows) { xterm.setOption('windowsMode', processTraits.requiresWindowsMode || false); // Force line data to be sent when the cursor is moved, the main purpose for // this is because ConPTY will often not do a line feed but instead move the // cursor, in which case we still want to send the current line's data to tasks. xterm.parser.registerCsiHandler({ final: 'H' }, () => { this._onCursorMove(); return false; }); } this._linkManager = this._instantiationService.createInstance(TerminalLinkManager, xterm, this._processManager!); this._areLinksReady = true; this._onLinksReady.fire(this); }); this._commandTrackerAddon = new CommandTrackerAddon(); this._xterm.loadAddon(this._commandTrackerAddon); this._register(this._themeService.onDidColorThemeChange(theme => this._updateTheme(xterm, theme))); this._register(this._viewDescriptorService.onDidChangeLocation(({ views }) => { if (views.some(v => v.id === TERMINAL_VIEW_ID)) { this._updateTheme(xterm); } })); this._xtermTypeAhead = this._register(this._instantiationService.createInstance(TypeAheadAddon, this._processManager, this._configHelper)); this._xterm.loadAddon(this._xtermTypeAhead); return xterm; } reattachToElement(container: HTMLElement): void { if (!this._wrapperElement) { throw new Error('The terminal instance has not been attached to a container yet'); } this._wrapperElement.parentNode?.removeChild(this._wrapperElement); this._container = container; this._container.appendChild(this._wrapperElement); } attachToElement(container: HTMLElement): Promise<void> | void { // The container did not change, do nothing if (this._container === container) { return; } // Attach has not occured yet if (!this._wrapperElement) { return this._attachToElement(container); } // The container changed, reattach this._container?.removeChild(this._wrapperElement); this._container = container; this._container.appendChild(this._wrapperElement); setTimeout(() => this._initDragAndDrop(container)); } private async _attachToElement(container: HTMLElement): Promise<void> { if (this._wrapperElement) { throw new Error('The terminal instance has already been attached to a container'); } this._container = container; this._wrapperElement = document.createElement('div'); this._wrapperElement.classList.add('terminal-wrapper'); this._xtermElement = document.createElement('div'); this._wrapperElement.appendChild(this._xtermElement); this._container.appendChild(this._wrapperElement); const xterm = await this._xtermReadyPromise; // Attach the xterm object to the DOM, exposing it to the smoke tests this._wrapperElement.xterm = this._xterm; xterm.open(this._xtermElement); const suggestedRendererType = this._storageService.get(SUGGESTED_RENDERER_TYPE, StorageScope.GLOBAL); if (this._configHelper.config.gpuAcceleration === 'auto' && (suggestedRendererType === 'auto' || suggestedRendererType === undefined) || this._configHelper.config.gpuAcceleration === 'on') { this._enableWebglRenderer(); } if (!xterm.element || !xterm.textarea) { throw new Error('xterm elements not set after open'); } this._setAriaLabel(xterm, this._instanceId, this._title); xterm.textarea.addEventListener('focus', () => this._onFocus.fire(this)); xterm.attachCustomKeyEventHandler((event: KeyboardEvent): boolean => { // Disable all input if the terminal is exiting if (this._isExiting) { return false; } const standardKeyboardEvent = new StandardKeyboardEvent(event); const resolveResult = this._keybindingService.softDispatch(standardKeyboardEvent, standardKeyboardEvent.target); // Respect chords if the allowChords setting is set and it's not Escape. Escape is // handled specially for Zen Mode's Escape, Escape chord, plus it's important in // terminals generally const isValidChord = resolveResult?.enterChord && this._configHelper.config.allowChords && event.key !== 'Escape'; if (this._keybindingService.inChordMode || isValidChord) { event.preventDefault(); return false; } const SHOW_TERMINAL_CONFIG_PROMPT_KEY = 'terminal.integrated.showTerminalConfigPrompt'; const EXCLUDED_KEYS = ['RightArrow', 'LeftArrow', 'UpArrow', 'DownArrow', 'Space', 'Meta', 'Control', 'Shift', 'Alt', '', 'Delete', 'Backspace', 'Tab']; // only keep track of input if prompt hasn't already been shown if (this._storageService.getBoolean(SHOW_TERMINAL_CONFIG_PROMPT_KEY, StorageScope.GLOBAL, true) && !EXCLUDED_KEYS.includes(event.key) && !event.ctrlKey && !event.shiftKey && !event.altKey) { this._hasHadInput = true; } // for keyboard events that resolve to commands described // within commandsToSkipShell, either alert or skip processing by xterm.js if (resolveResult && resolveResult.commandId && this._skipTerminalCommands.some(k => k === resolveResult.commandId) && !this._configHelper.config.sendKeybindingsToShell) { // don't alert when terminal is opened or closed if (this._storageService.getBoolean(SHOW_TERMINAL_CONFIG_PROMPT_KEY, StorageScope.GLOBAL, true) && this._hasHadInput && !TERMINAL_CREATION_COMMANDS.includes(resolveResult.commandId)) { this._notificationService.prompt( Severity.Info, nls.localize('keybindingHandling', "Some keybindings don't go to the terminal by default and are handled by {0} instead.", this._productService.nameLong), [ { label: nls.localize('configureTerminalSettings', "Configure Terminal Settings"), run: () => { this._preferencesService.openSettings(false, `@id:${TerminalSettingId.CommandsToSkipShell},${TerminalSettingId.SendKeybindingsToShell},${TerminalSettingId.AllowChords}`); } } as IPromptChoice ] ); this._storageService.store(SHOW_TERMINAL_CONFIG_PROMPT_KEY, false, StorageScope.GLOBAL, StorageTarget.USER); } event.preventDefault(); return false; } // Skip processing by xterm.js of keyboard events that match menu bar mnemonics if (this._configHelper.config.allowMnemonics && !isMacintosh && event.altKey) { return false; } // If tab focus mode is on, tab is not passed to the terminal if (TabFocus.getTabFocusMode() && event.keyCode === 9) { return false; } // Always have alt+F4 skip the terminal on Windows and allow it to be handled by the // system if (isWindows && event.altKey && event.key === 'F4' && !event.ctrlKey) { return false; } // Fallback to force ctrl+v to paste on browsers that do not support // navigator.clipboard.readText if (!BrowserFeatures.clipboard.readText && event.key === 'v' && event.ctrlKey) { return false; } return true; }); this._register(dom.addDisposableListener(xterm.element, 'mousedown', () => { // We need to listen to the mouseup event on the document since the user may release // the mouse button anywhere outside of _xterm.element. const listener = dom.addDisposableListener(document, 'mouseup', () => { // Delay with a setTimeout to allow the mouseup to propagate through the DOM // before evaluating the new selection state. setTimeout(() => this._refreshSelectionContextKey(), 0); listener.dispose(); }); })); // xterm.js currently drops selection on keyup as we need to handle this case. this._register(dom.addDisposableListener(xterm.element, 'keyup', () => { // Wait until keyup has propagated through the DOM before evaluating // the new selection state. setTimeout(() => this._refreshSelectionContextKey(), 0); })); this._register(dom.addDisposableListener(xterm.textarea, 'focus', () => { this._terminalFocusContextKey.set(true); if (this.shellType) { this._terminalShellTypeContextKey.set(this.shellType.toString()); } else { this._terminalShellTypeContextKey.reset(); } this._onFocused.fire(this); })); this._register(dom.addDisposableListener(xterm.textarea, 'blur', () => { this._terminalFocusContextKey.reset(); this._refreshSelectionContextKey(); })); this._initDragAndDrop(container); this._widgetManager.attachToElement(xterm.element); this._processManager.onProcessReady(() => this._linkManager?.setWidgetManager(this._widgetManager)); // const computedStyle = window.getComputedStyle(this._container); // const computedStyle = window.getComputedStyle(this._container.parentElement!); // const width = parseInt(computedStyle.getPropertyValue('width').replace('px', ''), 10); // const height = parseInt(computedStyle.getPropertyValue('height').replace('px', ''), 10); if (this._lastLayoutDimensions) { this.layout(this._lastLayoutDimensions); } this.setVisible(this._isVisible); this.updateConfig(); // If IShellLaunchConfig.waitOnExit was true and the process finished before the terminal // panel was initialized. if (xterm.getOption('disableStdin')) { this._attachPressAnyKeyToCloseListener(xterm); } } private _initDragAndDrop(container: HTMLElement) { this._dndObserver?.dispose(); const dndController = new TerminalInstanceDropAndDropController(container); dndController.onDropTerminal(e => this._onRequestAddInstanceToGroup.fire(e)); dndController.onDropFile(async path => { const preparedPath = await this._terminalInstanceService.preparePathForTerminalAsync(path, this.shellLaunchConfig.executable, this.title, this.shellType, this.isRemote); this.sendText(preparedPath, false); this.focus(); }); this._dndObserver = new DragAndDropObserver(container.parentElement!, dndController); } private async _measureRenderTime(): Promise<void> { await this._xtermReadyPromise; const frameTimes: number[] = []; const textRenderLayer = this._xtermCore!._renderService._renderer._renderLayers[0]; const originalOnGridChanged = textRenderLayer.onGridChanged; const evaluateCanvasRenderer = () => { // Discard first frame time as it's normal to take longer frameTimes.shift(); const medianTime = frameTimes.sort((a, b) => a - b)[Math.floor(frameTimes.length / 2)]; if (medianTime > SLOW_CANVAS_RENDER_THRESHOLD) { if (this._configHelper.config.gpuAcceleration === 'auto') { this._storageService.store(SUGGESTED_RENDERER_TYPE, 'dom', StorageScope.GLOBAL, StorageTarget.MACHINE); this.updateConfig(); } else { const promptChoices: IPromptChoice[] = [ { label: nls.localize('yes', "Yes"), run: () => this._configurationService.updateValue(TerminalSettingId.GpuAcceleration, 'off', ConfigurationTarget.USER) } as IPromptChoice, { label: nls.localize('no', "No"), run: () => { } } as IPromptChoice, { label: nls.localize('dontShowAgain', "Don't Show Again"), isSecondary: true, run: () => this._storageService.store(NEVER_MEASURE_RENDER_TIME_STORAGE_KEY, true, StorageScope.GLOBAL, StorageTarget.MACHINE) } as IPromptChoice ]; this._notificationService.prompt( Severity.Warning, nls.localize('terminal.slowRendering', 'Terminal GPU acceleration appears to be slow on your computer. Would you like to switch to disable it which may improve performance? [Read more about terminal settings](https://code.visualstudio.com/docs/editor/integrated-terminal#_changing-how-the-terminal-is-rendered).'), promptChoices ); } } }; textRenderLayer.onGridChanged = (terminal: XTermTerminal, firstRow: number, lastRow: number) => { const startTime = performance.now(); originalOnGridChanged.call(textRenderLayer, terminal, firstRow, lastRow); frameTimes.push(performance.now() - startTime); if (frameTimes.length === NUMBER_OF_FRAMES_TO_MEASURE) { evaluateCanvasRenderer(); // Restore original function textRenderLayer.onGridChanged = originalOnGridChanged; } }; } hasSelection(): boolean { return this._xterm ? this._xterm.hasSelection() : false; } async copySelection(): Promise<void> { const xterm = await this._xtermReadyPromise; if (this.hasSelection()) { await this._clipboardService.writeText(xterm.getSelection()); } else { this._notificationService.warn(nls.localize('terminal.integrated.copySelection.noSelection', 'The terminal has no selection to copy')); } } get selection(): string | undefined { return this._xterm && this.hasSelection() ? this._xterm.getSelection() : undefined; } clearSelection(): void { this._xterm?.clearSelection(); } selectAll(): void { // Focus here to ensure the terminal context key is set this._xterm?.focus(); this._xterm?.selectAll(); } findNext(term: string, searchOptions: ISearchOptions): boolean { if (!this._xtermSearch) { return false; } return this._xtermSearch.findNext(term, searchOptions); } findPrevious(term: string, searchOptions: ISearchOptions): boolean { if (!this._xtermSearch) { return false; } return this._xtermSearch.findPrevious(term, searchOptions); } notifyFindWidgetFocusChanged(isFocused: boolean): void { if (!this._xterm) { return; } const terminalFocused = !isFocused && (document.activeElement === this._xterm.textarea || document.activeElement === this._xterm.element); this._terminalFocusContextKey.set(terminalFocused); } private _refreshAltBufferContextKey() { this._terminalAltBufferActiveContextKey.set(!!(this._xterm && this._xterm.buffer.active === this._xterm.buffer.alternate)); } override dispose(immediate?: boolean): void { this._logService.trace(`terminalInstance#dispose (instanceId: ${this.instanceId})`); dispose(this._linkManager); this._linkManager = undefined; dispose(this._commandTrackerAddon); this._commandTrackerAddon = undefined; dispose(this._widgetManager); if (this._xterm && this._xterm.element) { this._hadFocusOnExit = this._xterm.element.classList.contains('focus'); } if (this._wrapperElement) { if (this._wrapperElement.xterm) { this._wrapperElement.xterm = undefined; } if (this._wrapperElement.parentElement && this._container) { this._container.removeChild(this._wrapperElement); } } if (this._xterm) { const buffer = this._xterm.buffer; this._sendLineData(buffer.active, buffer.active.baseY + buffer.active.cursorY); this._xterm.dispose(); } if (this._pressAnyKeyToCloseListener) { this._pressAnyKeyToCloseListener.dispose(); this._pressAnyKeyToCloseListener = undefined; } this._processManager.dispose(immediate); // Process manager dispose/shutdown doesn't fire process exit, trigger with undefined if it // hasn't happened yet this._onProcessExit(undefined); if (!this._isDisposed) { this._isDisposed = true; this._onDisposed.fire(this); } super.dispose(); } detachFromProcess(): void { this._processManager.detachFromProcess(); } forceRedraw(): void { if (!this._xterm) { return; } this._webglAddon?.clearTextureAtlas(); // TODO: Do canvas renderer too? } focus(force?: boolean): void { this._refreshAltBufferContextKey(); if (!this._xterm) { return; } const selection = window.getSelection(); if (!selection) { return; } const text = selection.toString(); if (!text || force) { this._xterm.focus(); } } async focusWhenReady(force?: boolean): Promise<void> { await this._xtermReadyPromise; this.focus(force); } async paste(): Promise<void> { if (!this._xterm) { return; } this.focus(); this._xterm.paste(await this._clipboardService.readText()); } async pasteSelection(): Promise<void> { if (!this._xterm) { return; } this.focus(); this._xterm.paste(await this._clipboardService.readText('selection')); } async sendText(text: string, addNewLine: boolean): Promise<void> { // Normalize line endings to 'enter' press. text = text.replace(/\r?\n/g, '\r'); if (addNewLine && text.substr(text.length - 1) !== '\r') { text += '\r'; } // Send it to the process return this._processManager.write(text); } setVisible(visible: boolean): void { this._isVisible = visible; if (this._wrapperElement) { this._wrapperElement.classList.toggle('active', visible); } if (visible && this._xterm && this._xtermCore) { // Trigger a manual scroll event which will sync the viewport and scroll bar. This is // necessary if the number of rows in the terminal has decreased while it was in the // background since scrollTop changes take no effect but the terminal's position does // change since the number of visible rows decreases. // This can likely be removed after https://github.com/xtermjs/xterm.js/issues/291 is // fixed upstream. this._xtermCore._onScroll.fire(this._xterm.buffer.active.viewportY); } } scrollDownLine(): void { this._xterm?.scrollLines(1); } scrollDownPage(): void { this._xterm?.scrollPages(1); } scrollToBottom(): void { this._xterm?.scrollToBottom(); } scrollUpLine(): void { this._xterm?.scrollLines(-1); } scrollUpPage(): void { this._xterm?.scrollPages(-1); } scrollToTop(): void { this._xterm?.scrollToTop(); } clear(): void { this._xterm?.clear(); } private _refreshSelectionContextKey() { const isActive = !!this._viewsService.getActiveViewWithId(TERMINAL_VIEW_ID); this._terminalHasTextContextKey.set(isActive && this.hasSelection()); } protected _createProcessManager(): void { this._processManager = this._instantiationService.createInstance(TerminalProcessManager, this._instanceId, this._configHelper); this._processManager.onProcessReady(() => { this._onProcessIdReady.fire(this); // Set the initial name based on the _resolved_ shell launch config, this will also // ensure the resolved icon gets shown if (this._shellLaunchConfig.name) { this.setTitle(this._shellLaunchConfig.name, TitleEventSource.Api); } else { // Only listen for process title changes when a name is not provided if (this._configHelper.config.titleMode === 'sequence') { // Set the title to the first event if the sequence hasn't set it yet Event.once(this._processManager.onProcessTitle)(e => { if (!this._title) { this.setTitle(e, TitleEventSource.Sequence); } }); // Listen to xterm.js' sequence title change event, trigger this async to ensure // _xtermReadyPromise is ready constructed since this is called from the ctor setTimeout(() => { this._xtermReadyPromise.then(xterm => { this._messageTitleDisposable = xterm.onTitleChange(e => this._onTitleChange(e)); }); }); } else { this.setTitle(this._shellLaunchConfig.executable, TitleEventSource.Process); this._messageTitleDisposable = this._processManager.onProcessTitle(title => this.setTitle(title ? title : '', TitleEventSource.Process)); } } }); this._processManager.onProcessExit(exitCode => this._onProcessExit(exitCode)); this._processManager.onProcessData(ev => { this._initialDataEvents?.push(ev.data); this._onData.fire(ev.data); }); this._processManager.onProcessOverrideDimensions(e => this.setDimensions(e, true)); this._processManager.onProcessResolvedShellLaunchConfig(e => this._setResolvedShellLaunchConfig(e)); this._processManager.onEnvironmentVariableInfoChanged(e => this._onEnvironmentVariableInfoChanged(e)); this._processManager.onProcessShellTypeChanged(type => this.setShellType(type)); this._processManager.onPtyDisconnect(() => { this._safeSetOption('disableStdin', true); this.statusList.add({ id: TerminalStatus.Disconnected, severity: Severity.Error, icon: Codicon.debugDisconnect, tooltip: nls.localize('disconnectStatus', "Lost connection to process") }); }); this._processManager.onPtyReconnect(() => { this._safeSetOption('disableStdin', false); this.statusList.remove(TerminalStatus.Disconnected); }); } private async _createProcess(): Promise<void> { if (this._isDisposed) { return; } await this._processManager.createProcess(this._shellLaunchConfig, this._cols, this._rows, this._accessibilityService.isScreenReaderOptimized()).then(error => { if (error) { this._onProcessExit(error); } }); } private _onProcessData(ev: IProcessDataEvent): void { const messageId = ++this._latestXtermWriteData; if (ev.trackCommit) { ev.writePromise = new Promise<void>(r => { this._xterm?.write(ev.data, () => { this._latestXtermParseData = messageId; this._processManager.acknowledgeDataEvent(ev.data.length); r(); }); }); } else { this._xterm?.write(ev.data, () => { this._latestXtermParseData = messageId; this._processManager.acknowledgeDataEvent(ev.data.length); }); } } /** * Called when either a process tied to a terminal has exited or when a terminal renderer * simulates a process exiting (e.g. custom execution task). * @param exitCode The exit code of the process, this is undefined when the terminal was exited * through user action. */ private async _onProcessExit(exitCodeOrError?: number | ITerminalLaunchError): Promise<void> { // Prevent dispose functions being triggered multiple times if (this._isExiting) { return; } this._isExiting = true; await this._flushXtermData(); this._logService.debug(`Terminal process exit (instanceId: ${this.instanceId}) with code ${this._exitCode}`); let exitCodeMessage: string | undefined; // Create exit code message switch (typeof exitCodeOrError) { case 'number': // Only show the error if the exit code is non-zero this._exitCode = exitCodeOrError; if (this._exitCode === 0) { break; } let commandLine: string | undefined = undefined; if (this._shellLaunchConfig.executable) { commandLine = this._shellLaunchConfig.executable; if (typeof this._shellLaunchConfig.args === 'string') { commandLine += ` ${this._shellLaunchConfig.args}`; } else if (this._shellLaunchConfig.args && this._shellLaunchConfig.args.length) { commandLine += this._shellLaunchConfig.args.map(a => ` '${a}'`).join(); } } if (this._processManager.processState === ProcessState.KilledDuringLaunch) { if (commandLine) { exitCodeMessage = nls.localize('launchFailed.exitCodeAndCommandLine', "The terminal process \"{0}\" failed to launch (exit code: {1}).", commandLine, this._exitCode); break; } exitCodeMessage = nls.localize('launchFailed.exitCodeOnly', "The terminal process failed to launch (exit code: {0}).", this._exitCode); break; } if (commandLine) { exitCodeMessage = nls.localize('terminated.exitCodeAndCommandLine', "The terminal process \"{0}\" terminated with exit code: {1}.", commandLine, this._exitCode); break; } exitCodeMessage = nls.localize('terminated.exitCodeOnly', "The terminal process terminated with exit code: {0}.", this._exitCode); break; case 'object': this._exitCode = exitCodeOrError.code; exitCodeMessage = nls.localize('launchFailed.errorMessage', "The terminal process failed to launch: {0}.", exitCodeOrError.message); break; } this._logService.debug(`Terminal process exit (instanceId: ${this.instanceId}) state ${this._processManager.processState}`); // Only trigger wait on exit when the exit was *not* triggered by the // user (via the `workbench.action.terminal.kill` command). if (this._shellLaunchConfig.waitOnExit && this._processManager.processState !== ProcessState.KilledByUser) { this._xtermReadyPromise.then(xterm => { if (exitCodeMessage) { xterm.writeln(exitCodeMessage); } if (typeof this._shellLaunchConfig.waitOnExit === 'string') { xterm.write(formatMessageForTerminal(this._shellLaunchConfig.waitOnExit)); } // Disable all input if the terminal is exiting and listen for next keypress xterm.setOption('disableStdin', true); if (xterm.textarea) { this._attachPressAnyKeyToCloseListener(xterm); } }); } else { this.dispose(); if (exitCodeMessage) { const failedDuringLaunch = this._processManager.processState === ProcessState.KilledDuringLaunch; if (failedDuringLaunch || this._configHelper.config.showExitAlert) { // Always show launch failures this._notificationService.notify({ message: exitCodeMessage, severity: Severity.Error, actions: { primary: [this._instantiationService.createInstance(TerminalLaunchHelpAction)] } }); } else { // Log to help surface the error in case users report issues with showExitAlert // disabled this._logService.warn(exitCodeMessage); } } } this._onExit.fire(this._exitCode); } /** * Ensure write calls to xterm.js have finished before resolving. */ private _flushXtermData(): Promise<void> { if (this._latestXtermWriteData === this._latestXtermParseData) { return Promise.resolve(); } let retries = 0; return new Promise<void>(r => { const interval = setInterval(() => { if (this._latestXtermWriteData === this._latestXtermParseData || ++retries === 5) { clearInterval(interval); r(); } }, 20); }); } private _attachPressAnyKeyToCloseListener(xterm: XTermTerminal) { if (xterm.textarea && !this._pressAnyKeyToCloseListener) { this._pressAnyKeyToCloseListener = dom.addDisposableListener(xterm.textarea, 'keypress', (event: KeyboardEvent) => { if (this._pressAnyKeyToCloseListener) { this._pressAnyKeyToCloseListener.dispose(); this._pressAnyKeyToCloseListener = undefined; this.dispose(); event.preventDefault(); } }); } } reuseTerminal(shell: IShellLaunchConfig, reset: boolean = false): void { // Unsubscribe any key listener we may have. this._pressAnyKeyToCloseListener?.dispose(); this._pressAnyKeyToCloseListener = undefined; if (this._xterm) { if (!reset) { // Ensure new processes' output starts at start of new line this._xterm.write('\n\x1b[G'); } // Print initialText if specified if (shell.initialText) { this._xterm.writeln(shell.initialText); } // Clean up waitOnExit state if (this._isExiting && this._shellLaunchConfig.waitOnExit) { this._xterm.setOption('disableStdin', false); this._isExiting = false; } } // Dispose the environment info widget if it exists this.statusList.remove(TerminalStatus.RelaunchNeeded); this._environmentInfo?.disposable.dispose(); this._environmentInfo = undefined; if (!reset) { // HACK: Force initialText to be non-falsy for reused terminals such that the // conptyInheritCursor flag is passed to the node-pty, this flag can cause a Window to stop // responding in Windows 10 1903 so we only want to use it when something is definitely written // to the terminal. shell.initialText = ' '; } // Set the new shell launch config this._shellLaunchConfig = shell; // Must be done before calling _createProcess() this._processManager.relaunch(this._shellLaunchConfig, this._cols, this._rows, this._accessibilityService.isScreenReaderOptimized(), reset); // Set title again as when creating the first process if (this._shellLaunchConfig.name) { this.setTitle(this._shellLaunchConfig.name, TitleEventSource.Api); } this._xtermTypeAhead?.reset(); } @debounce(1000) relaunch(): void { this.reuseTerminal(this._shellLaunchConfig, true); } private _onLineFeed(): void { const buffer = this._xterm!.buffer; const newLine = buffer.active.getLine(buffer.active.baseY + buffer.active.cursorY); if (newLine && !newLine.isWrapped) { this._sendLineData(buffer.active, buffer.active.baseY + buffer.active.cursorY - 1); } } private _onCursorMove(): void { const buffer = this._xterm!.buffer; this._sendLineData(buffer.active, buffer.active.baseY + buffer.active.cursorY); } private _onTitleChange(title: string): void { if (this.isTitleSetByProcess) { this.setTitle(title, TitleEventSource.Sequence); } } private _sendLineData(buffer: IBuffer, lineIndex: number): void { let line = buffer.getLine(lineIndex); if (!line) { return; } let lineData = line.translateToString(true); while (lineIndex > 0 && line.isWrapped) { line = buffer.getLine(--lineIndex); if (!line) { break; } lineData = line.translateToString(false) + lineData; } this._onLineData.fire(lineData); } private _onKey(key: string, ev: KeyboardEvent): void { const event = new StandardKeyboardEvent(ev); if (event.equals(KeyCode.Enter)) { this._updateProcessCwd(); } } private async _onSelectionChange(): Promise<void> { if (this._configurationService.getValue(TerminalSettingId.CopyOnSelection)) { if (this.hasSelection()) { await this.copySelection(); } } } @debounce(2000) private async _updateProcessCwd(): Promise<string> { // reset cwd if it has changed, so file based url paths can be resolved const cwd = await this.getCwd(); if (cwd && this._linkManager) { this._linkManager.processCwd = cwd; } return cwd; } updateConfig(): void { const config = this._configHelper.config; this._safeSetOption('altClickMovesCursor', config.altClickMovesCursor); this._setCursorBlink(config.cursorBlinking); this._setCursorStyle(config.cursorStyle); this._setCursorWidth(config.cursorWidth); this._setCommandsToSkipShell(config.commandsToSkipShell); this._safeSetOption('scrollback', config.scrollback); this._safeSetOption('minimumContrastRatio', config.minimumContrastRatio); this._safeSetOption('fastScrollSensitivity', config.fastScrollSensitivity); this._safeSetOption('scrollSensitivity', config.mouseWheelScrollSensitivity); this._safeSetOption('macOptionIsMeta', config.macOptionIsMeta); const editorOptions = this._configurationService.getValue<IEditorOptions>('editor'); this._safeSetOption('altClickMovesCursor', config.altClickMovesCursor && editorOptions.multiCursorModifier === 'alt'); this._safeSetOption('macOptionClickForcesSelection', config.macOptionClickForcesSelection); this._safeSetOption('rightClickSelectsWord', config.rightClickBehavior === 'selectWord'); this._safeSetOption('wordSeparator', config.wordSeparators); const suggestedRendererType = this._storageService.get(SUGGESTED_RENDERER_TYPE, StorageScope.GLOBAL); if ((config.gpuAcceleration === 'auto' && suggestedRendererType === undefined) || config.gpuAcceleration === 'on') { this._enableWebglRenderer(); } else { this._disposeOfWebglRenderer(); this._safeSetOption('rendererType', (config.gpuAcceleration === 'auto' && suggestedRendererType === 'dom') ? 'dom' : (config.gpuAcceleration === 'off' ? 'dom' : 'canvas')); } this._refreshEnvironmentVariableInfoWidgetState(this._processManager.environmentVariableInfo); } private async _enableWebglRenderer(): Promise<void> { if (!this._xterm || this._webglAddon) { return; } const Addon = await this._terminalInstanceService.getXtermWebglConstructor(); this._webglAddon = new Addon(); try { this._xterm.loadAddon(this._webglAddon); this._webglAddon.onContextLoss(() => { this._logService.info(`Webgl lost context, disposing of webgl renderer`); this._disposeOfWebglRenderer(); this._safeSetOption('rendererType', 'dom'); }); this._storageService.store(SUGGESTED_RENDERER_TYPE, 'auto', StorageScope.GLOBAL, StorageTarget.MACHINE); } catch (e) { this._logService.warn(`Webgl could not be loaded. Falling back to the canvas renderer type.`, e); const neverMeasureRenderTime = this._storageService.getBoolean(NEVER_MEASURE_RENDER_TIME_STORAGE_KEY, StorageScope.GLOBAL, false); // if it's already set to dom, no need to measure render time if (!neverMeasureRenderTime && this._configHelper.config.gpuAcceleration !== 'off') { this._measureRenderTime(); } this._safeSetOption('rendererType', 'canvas'); this._storageService.store(SUGGESTED_RENDERER_TYPE, 'canvas', StorageScope.GLOBAL, StorageTarget.MACHINE); this._disposeOfWebglRenderer(); } } private _disposeOfWebglRenderer(): void { try { this._webglAddon?.dispose(); } catch { // ignore } this._webglAddon = undefined; } private async _updateUnicodeVersion(): Promise<void> { if (!this._xterm) { throw new Error('Cannot update unicode version before xterm has been initialized'); } if (!this._xtermUnicode11 && this._configHelper.config.unicodeVersion === '11') { const Addon = await this._terminalInstanceService.getXtermUnicode11Constructor(); this._xtermUnicode11 = new Addon(); this._xterm.loadAddon(this._xtermUnicode11); } this._xterm.unicode.activeVersion = this._configHelper.config.unicodeVersion; } updateAccessibilitySupport(): void { const isEnabled = this._accessibilityService.isScreenReaderOptimized(); if (isEnabled) { this._navigationModeAddon = new NavigationModeAddon(this._terminalA11yTreeFocusContextKey); this._xterm!.loadAddon(this._navigationModeAddon); } else { this._navigationModeAddon?.dispose(); this._navigationModeAddon = undefined; } this._xterm!.setOption('screenReaderMode', isEnabled); } private _setCursorBlink(blink: boolean): void { if (this._xterm && this._xterm.getOption('cursorBlink') !== blink) { this._xterm.setOption('cursorBlink', blink); this._xterm.refresh(0, this._xterm.rows - 1); } } private _setCursorStyle(style: string): void { if (this._xterm && this._xterm.getOption('cursorStyle') !== style) { // 'line' is used instead of bar in VS Code to be consistent with editor.cursorStyle const xtermOption = style === 'line' ? 'bar' : style; this._xterm.setOption('cursorStyle', xtermOption); } } private _setCursorWidth(width: number): void { if (this._xterm && this._xterm.getOption('cursorWidth') !== width) { this._xterm.setOption('cursorWidth', width); } } private _setCommandsToSkipShell(commands: string[]): void { const excludeCommands = commands.filter(command => command[0] === '-').map(command => command.slice(1)); this._skipTerminalCommands = DEFAULT_COMMANDS_TO_SKIP_SHELL.filter(defaultCommand => { return excludeCommands.indexOf(defaultCommand) === -1; }).concat(commands); } private _safeSetOption(key: string, value: any): void { if (!this._xterm) { return; } if (this._xterm.getOption(key) !== value) { this._xterm.setOption(key, value); } } layout(dimension: dom.Dimension): void { this._lastLayoutDimensions = dimension; if (this.disableLayout) { return; } // Don't layout if dimensions are invalid (eg. the container is not attached to the DOM or // if display: none if (dimension.width <= 0 || dimension.height <= 0) { return; } const terminalWidth = this._evaluateColsAndRows(dimension.width, dimension.height); if (!terminalWidth) { return; } if (this._xterm && this._xterm.element) { this._xterm.element.style.width = terminalWidth + 'px'; } this._resize(); // Signal the container is ready this._containerReadyBarrier.open(); } @debounce(50) private async _resize(): Promise<void> { this._resizeNow(false); } private async _resizeNow(immediate: boolean): Promise<void> { let cols = this.cols; let rows = this.rows; if (this._xterm && this._xtermCore) { // Only apply these settings when the terminal is visible so that // the characters are measured correctly. if (this._isVisible) { const font = this._configHelper.getFont(this._xtermCore); const config = this._configHelper.config; this._safeSetOption('letterSpacing', font.letterSpacing); this._safeSetOption('lineHeight', font.lineHeight); this._safeSetOption('fontSize', font.fontSize); this._safeSetOption('fontFamily', font.fontFamily); this._safeSetOption('fontWeight', config.fontWeight); this._safeSetOption('fontWeightBold', config.fontWeightBold); this._safeSetOption('drawBoldTextInBrightColors', config.drawBoldTextInBrightColors); // Any of the above setting changes could have changed the dimensions of the // terminal, re-evaluate now. this._initDimensions(); cols = this.cols; rows = this.rows; } if (isNaN(cols) || isNaN(rows)) { return; } if (cols !== this._xterm.cols || rows !== this._xterm.rows) { this._onDimensionsChanged.fire(); } this._xterm.resize(cols, rows); TerminalInstance._lastKnownGridDimensions = { cols, rows }; if (this._isVisible) { // HACK: Force the renderer to unpause by simulating an IntersectionObserver event. // This is to fix an issue where dragging the window to the top of the screen to // maximize on Windows/Linux would fire an event saying that the terminal was not // visible. if (this._xterm.getOption('rendererType') === 'canvas') { this._xtermCore._renderService?._onIntersectionChange({ intersectionRatio: 1 }); // HACK: Force a refresh of the screen to ensure links are refresh corrected. // This can probably be removed when the above hack is fixed in Chromium. this._xterm.refresh(0, this._xterm.rows - 1); } } } if (immediate) { // do not await, call setDimensions synchronously this._processManager.setDimensions(cols, rows, true); } else { await this._processManager.setDimensions(cols, rows); } } setShellType(shellType: TerminalShellType) { this._shellType = shellType; } private _setAriaLabel(xterm: XTermTerminal | undefined, terminalId: number, title: string | undefined): void { if (xterm) { if (title && title.length > 0) { xterm.textarea?.setAttribute('aria-label', nls.localize('terminalTextBoxAriaLabelNumberAndTitle', "Terminal {0}, {1}", terminalId, title)); } else { xterm.textarea?.setAttribute('aria-label', nls.localize('terminalTextBoxAriaLabel', "Terminal {0}", terminalId)); } } } setTitle(title: string | undefined, eventSource: TitleEventSource): void { if (!title) { return; } switch (eventSource) { case TitleEventSource.Process: if (this._processManager.os === OperatingSystem.Windows) { // Extract the file name without extension title = path.win32.parse(title).name; } else { const firstSpaceIndex = title.indexOf(' '); if (title.startsWith('/')) { title = path.basename(title); } else if (firstSpaceIndex > -1) { title = title.substring(0, firstSpaceIndex); } } break; case TitleEventSource.Api: // If the title has not been set by the API or the rename command, unregister the handler that // automatically updates the terminal name dispose(this._messageTitleDisposable); this._messageTitleDisposable = undefined; break; case TitleEventSource.Sequence: // On Windows, some shells will fire this with the full path which we want to trim // to show just the file name. This should only happen if the title looks like an // absolute Windows file path if (this._processManager.os === OperatingSystem.Windows && title.match(/^[a-zA-Z]:\\.+\.[a-zA-Z]{1,3}/)) { title = path.win32.parse(title).name; } break; } const didTitleChange = title !== this._title; this._title = title; this._titleSource = eventSource; if (didTitleChange) { this._setAriaLabel(this._xterm, this._instanceId, this._title); if (this._titleReadyComplete) { this._titleReadyComplete(title); this._titleReadyComplete = undefined; } this._onTitleChanged.fire(this); } } waitForTitle(): Promise<string> { return this._titleReadyPromise; } setDimensions(dimensions: ITerminalDimensionsOverride | undefined, immediate: boolean = false): void { if (this._dimensionsOverride && this._dimensionsOverride.forceExactSize && !dimensions && this._rows === 0 && this._cols === 0) { // this terminal never had a real size => keep the last dimensions override exact size this._cols = this._dimensionsOverride.cols; this._rows = this._dimensionsOverride.rows; } this._dimensionsOverride = dimensions; if (immediate) { this._resizeNow(true); } else { this._resize(); } } private _setResolvedShellLaunchConfig(shellLaunchConfig: IShellLaunchConfig): void { this._shellLaunchConfig.args = shellLaunchConfig.args; this._shellLaunchConfig.cwd = shellLaunchConfig.cwd; this._shellLaunchConfig.executable = shellLaunchConfig.executable; this._shellLaunchConfig.env = shellLaunchConfig.env; } showEnvironmentInfoHover(): void { if (this._environmentInfo) { this._environmentInfo.widget.focus(); } } private _onEnvironmentVariableInfoChanged(info: IEnvironmentVariableInfo): void { if (info.requiresAction) { this._xterm?.textarea?.setAttribute('aria-label', nls.localize('terminalStaleTextBoxAriaLabel', "Terminal {0} environment is stale, run the 'Show Environment Information' command for more information", this._instanceId)); } this._refreshEnvironmentVariableInfoWidgetState(info); } private _refreshEnvironmentVariableInfoWidgetState(info?: IEnvironmentVariableInfo): void { // Check if the widget should not exist if ( !info || this._configHelper.config.environmentChangesIndicator === 'off' || this._configHelper.config.environmentChangesIndicator === 'warnonly' && !info.requiresAction ) { this.statusList.remove(TerminalStatus.RelaunchNeeded); this._environmentInfo?.disposable.dispose(); this._environmentInfo = undefined; return; } // Recreate the process if the terminal has not yet been interacted with and it's not a // special terminal (eg. task, extension terminal) if ( info.requiresAction && this._configHelper.config.environmentChangesRelaunch && !this._processManager.hasWrittenData && !this._shellLaunchConfig.isFeatureTerminal && !this._shellLaunchConfig.customPtyImplementation && !this._shellLaunchConfig.isExtensionOwnedTerminal && !this._shellLaunchConfig.attachPersistentProcess ) { this.relaunch(); return; } // (Re-)create the widget this._environmentInfo?.disposable.dispose(); const widget = this._instantiationService.createInstance(EnvironmentVariableInfoWidget, info); const disposable = this._widgetManager.attachWidget(widget); if (info.requiresAction) { this.statusList.add({ id: TerminalStatus.RelaunchNeeded, severity: Severity.Warning, icon: Codicon.warning, tooltip: info.getInfo(), hoverActions: info.getActions ? info.getActions() : undefined }); } if (disposable) { this._environmentInfo = { widget, disposable }; } } private _getXtermTheme(theme?: IColorTheme): ITheme { if (!theme) { theme = this._themeService.getColorTheme(); } const location = this._viewDescriptorService.getViewLocationById(TERMINAL_VIEW_ID)!; const foregroundColor = theme.getColor(TERMINAL_FOREGROUND_COLOR); const backgroundColor = theme.getColor(TERMINAL_BACKGROUND_COLOR) || (location === ViewContainerLocation.Sidebar ? theme.getColor(SIDE_BAR_BACKGROUND) : theme.getColor(PANEL_BACKGROUND)); const cursorColor = theme.getColor(TERMINAL_CURSOR_FOREGROUND_COLOR) || foregroundColor; const cursorAccentColor = theme.getColor(TERMINAL_CURSOR_BACKGROUND_COLOR) || backgroundColor; const selectionColor = theme.getColor(TERMINAL_SELECTION_BACKGROUND_COLOR); return { background: backgroundColor ? backgroundColor.toString() : undefined, foreground: foregroundColor ? foregroundColor.toString() : undefined, cursor: cursorColor ? cursorColor.toString() : undefined, cursorAccent: cursorAccentColor ? cursorAccentColor.toString() : undefined, selection: selectionColor ? selectionColor.toString() : undefined, black: theme.getColor(ansiColorIdentifiers[0])!.toString(), red: theme.getColor(ansiColorIdentifiers[1])!.toString(), green: theme.getColor(ansiColorIdentifiers[2])!.toString(), yellow: theme.getColor(ansiColorIdentifiers[3])!.toString(), blue: theme.getColor(ansiColorIdentifiers[4])!.toString(), magenta: theme.getColor(ansiColorIdentifiers[5])!.toString(), cyan: theme.getColor(ansiColorIdentifiers[6])!.toString(), white: theme.getColor(ansiColorIdentifiers[7])!.toString(), brightBlack: theme.getColor(ansiColorIdentifiers[8])!.toString(), brightRed: theme.getColor(ansiColorIdentifiers[9])!.toString(), brightGreen: theme.getColor(ansiColorIdentifiers[10])!.toString(), brightYellow: theme.getColor(ansiColorIdentifiers[11])!.toString(), brightBlue: theme.getColor(ansiColorIdentifiers[12])!.toString(), brightMagenta: theme.getColor(ansiColorIdentifiers[13])!.toString(), brightCyan: theme.getColor(ansiColorIdentifiers[14])!.toString(), brightWhite: theme.getColor(ansiColorIdentifiers[15])!.toString() }; } private _updateTheme(xterm: XTermTerminal, theme?: IColorTheme): void { xterm.setOption('theme', this._getXtermTheme(theme)); } async toggleEscapeSequenceLogging(): Promise<void> { const xterm = await this._xtermReadyPromise; const isDebug = xterm.getOption('logLevel') === 'debug'; xterm.setOption('logLevel', isDebug ? 'info' : 'debug'); } getInitialCwd(): Promise<string> { return this._processManager.getInitialCwd(); } getCwd(): Promise<string> { return this._processManager.getCwd(); } registerLinkProvider(provider: ITerminalExternalLinkProvider): IDisposable { if (!this._linkManager) { throw new Error('TerminalInstance.registerLinkProvider before link manager was ready'); } return this._linkManager.registerExternalLinkProvider(this, provider); } async rename() { const name = await this._quickInputService.input({ value: this.title, prompt: nls.localize('workbench.action.terminal.rename.prompt', "Enter terminal name"), }); if (name) { this.setTitle(name, TitleEventSource.Api); } } async changeIcon() { const items: IQuickPickItem[] = []; for (const icon of iconRegistry.all) { items.push({ label: `$(${icon.id})`, description: `${icon.id}` }); } const result = await this._quickInputService.pick(items, { title: nls.localize('changeTerminalIcon', "Change Icon"), matchOnDescription: true }); if (result && result.description) { this.shellLaunchConfig.icon = iconRegistry.get(result.description); this._onIconChanged.fire(this); } } async changeColor() { const icon = this._getIcon(); if (!icon) { return; } const items: IQuickPickItem[] = []; for (const color of colors) { items.push({ label: `$(${Codicon.circleFilled.id}) ${color.replace('terminal.ansi', '')}`, id: `${color.replace(/\./g, '_')}`, description: `${color}`, iconClasses: [`terminal-icon-${color.replace(/\./g, '_')}`] }); } const result = await this._quickInputService.pick(items, { title: nls.localize('changeTerminalColor', "Change Color"), matchOnDescription: true }); if (result) { this.shellLaunchConfig.color = result.id; this._onIconChanged.fire(this); } } } class
extends Disposable implements IDragAndDropObserverCallbacks { private _dropOverlay?: HTMLElement; private readonly _onDropFile = new Emitter<string>(); get onDropFile(): Event<string> { return this._onDropFile.event; } private readonly _onDropTerminal = new Emitter<IRequestAddInstanceToGroupEvent>(); get onDropTerminal(): Event<IRequestAddInstanceToGroupEvent> { return this._onDropTerminal.event; } constructor( private readonly _container: HTMLElement ) { super(); this._register(toDisposable(() => this._clearDropOverlay())); } private _clearDropOverlay() { if (this._dropOverlay && this._dropOverlay.parentElement) { this._dropOverlay.parentElement.removeChild(this._dropOverlay); } this._dropOverlay = undefined; } onDragEnter(e: DragEvent) { if (!this._dropOverlay) { this._dropOverlay = document.createElement('div'); this._dropOverlay.classList.add('terminal-drop-overlay'); } const types = e.dataTransfer?.types || []; // Dragging terminals if (types.includes('terminals')) { const side = this._getDropSide(e); this._dropOverlay.classList.toggle('drop-left', side === 'left'); this._dropOverlay.classList.toggle('drop-right', side === 'right'); } if (!this._dropOverlay.parentElement) { this._container.appendChild(this._dropOverlay); } } onDragLeave(e: DragEvent) { this._clearDropOverlay(); } onDragEnd(e: DragEvent) { this._clearDropOverlay(); } onDragOver(e: DragEvent) { if (!e.dataTransfer || !this._dropOverlay) { return; } const types = e.dataTransfer?.types || []; // Dragging terminals if (types.includes('terminals')) { const side = this._getDropSide(e); this._dropOverlay.classList.toggle('drop-left', side === 'left'); this._dropOverlay.classList.toggle('drop-right', side === 'right'); } this._dropOverlay.style.opacity = '1'; } async onDrop(e: DragEvent) { this._clearDropOverlay(); if (!e.dataTransfer) { return; } // Check if files were dragged from the tree explorer let path: string | undefined; const resources = e.dataTransfer.getData(DataTransfers.RESOURCES); if (resources) { const uri = URI.parse(JSON.parse(resources)[0]); if (uri.scheme === Schemas.vscodeTerminal) { this._onDropTerminal.fire({ uri, side: this._getDropSide(e) }); return; } else { path = uri.fsPath; } } else if (e.dataTransfer.files?.[0].path /* Electron only */) { // Check if the file was dragged from the filesystem path = URI.file(e.dataTransfer.files[0].path).fsPath; } if (!path) { return; } this._onDropFile.fire(path); } private _getDropSide(e: DragEvent): 'left' | 'right' { const target = this._container; if (!target) { return 'right'; } const rect = target.getBoundingClientRect(); return e.clientX - rect.left < rect.width / 2 ? 'left' : 'right'; } } let colors: string[] = []; registerThemingParticipant((theme: IColorTheme, collector: ICssStyleCollector) => { // add icon colors colors = []; for (const colorKey in ansiColorMap) { const color = theme.getColor(colorKey); if (color && !colorKey.toLowerCase().includes('bright')) { colors.push(colorKey); // exclude status icons (file-icon) and inline action icons (trashcan and horizontalSplit) collector.addRule(`.monaco-workbench .terminal-icon-${colorKey.replace(/\./g, '_')} .codicon:not(.codicon-split-horizontal):not(.codicon-trashcan):not(.file-icon) { color: ${color} !important; }`); } } // Border const border = theme.getColor(activeContrastBorder); if (border) { collector.addRule(` .monaco-workbench.hc-black .pane-body.integrated-terminal .xterm.focus::before, .monaco-workbench.hc-black .pane-body.integrated-terminal .xterm:focus::before { border-color: ${border}; }` ); } // Scrollbar const scrollbarSliderBackgroundColor = theme.getColor(scrollbarSliderBackground); if (scrollbarSliderBackgroundColor) { collector.addRule(` .monaco-workbench .pane-body.integrated-terminal .find-focused .xterm .xterm-viewport, .monaco-workbench .pane-body.integrated-terminal .xterm.focus .xterm-viewport, .monaco-workbench .pane-body.integrated-terminal .xterm:focus .xterm-viewport, .monaco-workbench .pane-body.integrated-terminal .xterm:hover .xterm-viewport { background-color: ${scrollbarSliderBackgroundColor} !important; } .monaco-workbench .pane-body.integrated-terminal .xterm-viewport { scrollbar-color: ${scrollbarSliderBackgroundColor} transparent; } `); } const scrollbarSliderHoverBackgroundColor = theme.getColor(scrollbarSliderHoverBackground); if (scrollbarSliderHoverBackgroundColor) { collector.addRule(` .monaco-workbench .pane-body.integrated-terminal .xterm .xterm-viewport::-webkit-scrollbar-thumb:hover { background-color: ${scrollbarSliderHoverBackgroundColor}; } .monaco-workbench .pane-body.integrated-terminal .xterm-viewport:hover { scrollbar-color: ${scrollbarSliderHoverBackgroundColor} transparent; } `); } const scrollbarSliderActiveBackgroundColor = theme.getColor(scrollbarSliderActiveBackground); if (scrollbarSliderActiveBackgroundColor) { collector.addRule(`.monaco-workbench .pane-body.integrated-terminal .xterm .xterm-viewport::-webkit-scrollbar-thumb:active { background-color: ${scrollbarSliderActiveBackgroundColor}; }`); } });
TerminalInstanceDropAndDropController
model_list_scaling_policies_request.go
package model import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/huaweicloud/huaweicloud-sdk-go-v3/core/utils" "errors" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/huaweicloud/huaweicloud-sdk-go-v3/core/converter" "strings" ) // Request Object type ListScalingPoliciesRequest struct { // 伸缩组ID。 ScalingGroupId string `json:"scaling_group_id"` // 伸缩策略名称。 ScalingPolicyName *string `json:"scaling_policy_name,omitempty"` // 策略类型。 ScalingPolicyType *ListScalingPoliciesRequestScalingPolicyType `json:"scaling_policy_type,omitempty"` // 伸缩策略ID。 ScalingPolicyId *string `json:"scaling_policy_id,omitempty"` // 查询的起始行号,默认为0。 StartNumber *int32 `json:"start_number,omitempty"` // 查询记录数,默认20,最大100。 Limit *int32 `json:"limit,omitempty"` } func (o ListScalingPoliciesRequest) String() string { data, err := utils.Marshal(o) if err != nil { return "ListScalingPoliciesRequest struct{}" } return strings.Join([]string{"ListScalingPoliciesRequest", string(data)}, " ") } type ListScalingPoliciesRequestScalingPolicyType struct { value string
type ListScalingPoliciesRequestScalingPolicyTypeEnum struct { ALARM ListScalingPoliciesRequestScalingPolicyType SCHEDULED ListScalingPoliciesRequestScalingPolicyType RECURRENCE ListScalingPoliciesRequestScalingPolicyType } func GetListScalingPoliciesRequestScalingPolicyTypeEnum() ListScalingPoliciesRequestScalingPolicyTypeEnum { return ListScalingPoliciesRequestScalingPolicyTypeEnum{ ALARM: ListScalingPoliciesRequestScalingPolicyType{ value: "ALARM", }, SCHEDULED: ListScalingPoliciesRequestScalingPolicyType{ value: "SCHEDULED", }, RECURRENCE: ListScalingPoliciesRequestScalingPolicyType{ value: "RECURRENCE", }, } } func (c ListScalingPoliciesRequestScalingPolicyType) MarshalJSON() ([]byte, error) { return utils.Marshal(c.value) } func (c *ListScalingPoliciesRequestScalingPolicyType) UnmarshalJSON(b []byte) error { myConverter := converter.StringConverterFactory("string") if myConverter != nil { val, err := myConverter.CovertStringToInterface(strings.Trim(string(b[:]), "\"")) if err == nil { c.value = val.(string) return nil } return err } else { return errors.New("convert enum data to string error") } }
}
access.go
// Copyright 2014 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package models import ( "fmt" "gogs/models/errors" log "gopkg.in/clog.v1" ) type AccessMode int const ( ACCESS_MODE_NONE AccessMode = iota // 0 ACCESS_MODE_READ // 1 ACCESS_MODE_WRITE // 2 ACCESS_MODE_ADMIN // 3 ACCESS_MODE_OWNER // 4 ) func (mode AccessMode) String() string { switch mode { case ACCESS_MODE_READ: return "read" case ACCESS_MODE_WRITE: return "write" case ACCESS_MODE_ADMIN: return "admin" case ACCESS_MODE_OWNER: return "owner" default: return "none" } } // ParseAccessMode returns corresponding access mode to given permission string. func ParseAccessMode(permission string) AccessMode { switch permission { case "write": return ACCESS_MODE_WRITE case "admin": return ACCESS_MODE_ADMIN default: return ACCESS_MODE_READ } } // Access represents the highest access level of a user to the repository. The only access type // that is not in this table is the real owner of a repository. In case of an organization // repository, the members of the owners team are in this table. type Access struct { ID int64 UserID int64 `xorm:"UNIQUE(s)"` RepoID int64 `xorm:"UNIQUE(s)"` Mode AccessMode } func accessLevel(e Engine, userID int64, repo *Repository) (AccessMode, error) { mode := ACCESS_MODE_NONE // Everyone has read access to public repository if !repo.IsPrivate { mode = ACCESS_MODE_READ } if userID <= 0 { return mode, nil } if userID == repo.OwnerID { return ACCESS_MODE_OWNER, nil } access := &Access{ UserID: userID, RepoID: repo.ID, } if has, err := e.Get(access); !has || err != nil { return mode, err } return access.Mode, nil } // AccessLevel returns the Access a user has to a repository. Will return NoneAccess if the // user does not have access. func AccessLevel(userID int64, repo *Repository) (AccessMode, error) { return accessLevel(x, userID, repo) } func
(e Engine, userID int64, repo *Repository, testMode AccessMode) (bool, error) { mode, err := accessLevel(e, userID, repo) return mode >= testMode, err } // HasAccess returns true if someone has the request access level. User can be nil! func HasAccess(userID int64, repo *Repository, testMode AccessMode) (bool, error) { return hasAccess(x, userID, repo, testMode) } // GetRepositoryAccesses finds all repositories with their access mode where a user has access but does not own. func (u *User) GetRepositoryAccesses() (map[*Repository]AccessMode, error) { accesses := make([]*Access, 0, 10) if err := x.Find(&accesses, &Access{UserID: u.ID}); err != nil { return nil, err } repos := make(map[*Repository]AccessMode, len(accesses)) for _, access := range accesses { repo, err := GetRepositoryByID(access.RepoID) if err != nil { if errors.IsRepoNotExist(err) { log.Error(2, "GetRepositoryByID: %v", err) continue } return nil, err } if repo.OwnerID == u.ID { continue } repos[repo] = access.Mode } return repos, nil } // GetAccessibleRepositories finds repositories which the user has access but does not own. // If limit is smaller than 1 means returns all found results. func (user *User) GetAccessibleRepositories(limit int) (repos []*Repository, _ error) { sess := x.Where("owner_id !=? ", user.ID).Desc("updated_unix") if limit > 0 { sess.Limit(limit) repos = make([]*Repository, 0, limit) } else { repos = make([]*Repository, 0, 10) } return repos, sess.Join("INNER", "access", "access.user_id = ? AND access.repo_id = repository.id", user.ID).Find(&repos) } func maxAccessMode(modes ...AccessMode) AccessMode { max := ACCESS_MODE_NONE for _, mode := range modes { if mode > max { max = mode } } return max } // FIXME: do corss-comparison so reduce deletions and additions to the minimum? func (repo *Repository) refreshAccesses(e Engine, accessMap map[int64]AccessMode) (err error) { newAccesses := make([]Access, 0, len(accessMap)) for userID, mode := range accessMap { newAccesses = append(newAccesses, Access{ UserID: userID, RepoID: repo.ID, Mode: mode, }) } // Delete old accesses and insert new ones for repository. if _, err = e.Delete(&Access{RepoID: repo.ID}); err != nil { return fmt.Errorf("delete old accesses: %v", err) } else if _, err = e.Insert(newAccesses); err != nil { return fmt.Errorf("insert new accesses: %v", err) } return nil } // refreshCollaboratorAccesses retrieves repository collaborations with their access modes. func (repo *Repository) refreshCollaboratorAccesses(e Engine, accessMap map[int64]AccessMode) error { collaborations, err := repo.getCollaborations(e) if err != nil { return fmt.Errorf("getCollaborations: %v", err) } for _, c := range collaborations { accessMap[c.UserID] = c.Mode } return nil } // recalculateTeamAccesses recalculates new accesses for teams of an organization // except the team whose ID is given. It is used to assign a team ID when // remove repository from that team. func (repo *Repository) recalculateTeamAccesses(e Engine, ignTeamID int64) (err error) { accessMap := make(map[int64]AccessMode, 20) if err = repo.getOwner(e); err != nil { return err } else if !repo.Owner.IsOrganization() { return fmt.Errorf("owner is not an organization: %d", repo.OwnerID) } if err = repo.refreshCollaboratorAccesses(e, accessMap); err != nil { return fmt.Errorf("refreshCollaboratorAccesses: %v", err) } if err = repo.Owner.getTeams(e); err != nil { return err } for _, t := range repo.Owner.Teams { if t.ID == ignTeamID { continue } // Owner team gets owner access, and skip for teams that do not // have relations with repository. if t.IsOwnerTeam() { t.Authorize = ACCESS_MODE_OWNER } else if !t.hasRepository(e, repo.ID) { continue } if err = t.getMembers(e); err != nil { return fmt.Errorf("getMembers '%d': %v", t.ID, err) } for _, m := range t.Members { accessMap[m.ID] = maxAccessMode(accessMap[m.ID], t.Authorize) } } return repo.refreshAccesses(e, accessMap) } func (repo *Repository) recalculateAccesses(e Engine) error { if repo.Owner.IsOrganization() { return repo.recalculateTeamAccesses(e, 0) } accessMap := make(map[int64]AccessMode, 10) if err := repo.refreshCollaboratorAccesses(e, accessMap); err != nil { return fmt.Errorf("refreshCollaboratorAccesses: %v", err) } return repo.refreshAccesses(e, accessMap) } // RecalculateAccesses recalculates all accesses for repository. func (repo *Repository) RecalculateAccesses() error { return repo.recalculateAccesses(x) }
hasAccess
lib.rs
#![cfg_attr(not(feature = "std"), no_std)] // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. #![recursion_limit = "256"] // Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); use pallet_grandpa::{ fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, }; use sp_api::impl_runtime_apis; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, MultiSignature, }; use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; // A few exports that help ease life for downstream crates. pub use frame_support::{ construct_runtime, parameter_types, traits::{KeyOwnerProofSystem, Randomness, StorageInfo}, weights::{ constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, IdentityFee, Weight, }, StorageValue, }; pub use pallet_balances::Call as BalancesCall; pub use pallet_timestamp::Call as TimestampCall; use pallet_transaction_payment::CurrencyAdapter; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; pub use sp_runtime::{Perbill, Permill}; /// Import the template pallet. pub use pallet_template; pub use pallet_dvine; /// An index to a block. pub type BlockNumber = u32; /// Alias to 512-bit hash when used in the context of a transaction signature on the chain. pub type Signature = MultiSignature; /// Some way of identifying an account on the chain. We intentionally make it equivalent /// to the public key of our transaction signing scheme. pub type AccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId; /// Balance of an account. pub type Balance = u128; /// Index of a transaction in the chain. pub type Index = u32; /// A hash of some data used by the chain. pub type Hash = sp_core::H256; /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know /// the specifics of the runtime. They can then be made to be agnostic over specific formats /// of data like extrinsics, allowing for them to continue syncing the network through upgrades /// to even the core data structures. pub mod opaque { use super::*; pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; /// Opaque block header type. pub type Header = generic::Header<BlockNumber, BlakeTwo256>; /// Opaque block type. pub type Block = generic::Block<Header, UncheckedExtrinsic>; /// Opaque block identifier type. pub type BlockId = generic::BlockId<Block>; impl_opaque_keys! { pub struct SessionKeys { pub aura: Aura, pub grandpa: Grandpa, } } } // To learn more about runtime versioning and what each of the following value means: // https://docs.substrate.io/v3/runtime/upgrades#runtime-versioning #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("node-template"), impl_name: create_runtime_str!("node-template"), authoring_version: 1, // The version of the runtime specification. A full node will not attempt to use its native // runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. spec_version: 100, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, }; /// This determines the average expected block time that we are targeting. /// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`. /// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked /// up by `pallet_aura` to implement `fn slot_duration()`. /// /// Change this to adjust the block time. pub const MILLISECS_PER_BLOCK: u64 = 6000; // NOTE: Currently it is not possible to change the slot duration after the chain has started. // Attempting to do so will brick block production. pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; // Time is measured by number of blocks. pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); pub const HOURS: BlockNumber = MINUTES * 60; pub const DAYS: BlockNumber = HOURS * 24; /// The version information used to identify this runtime when compiled natively. #[cfg(feature = "std")] pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); parameter_types! { pub const Version: RuntimeVersion = VERSION; pub const BlockHashCount: BlockNumber = 2400; /// We allow for 2 seconds of compute with a 6 second average block time. pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights ::with_sensible_defaults(2 * WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength ::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); pub const SS58Prefix: u8 = 42; } // Configure FRAME pallets to include in runtime. impl frame_system::Config for Runtime { /// The basic call filter to use in dispatchable. type BaseCallFilter = frame_support::traits::Everything; /// Block & extrinsics weights: base values and limits. type BlockWeights = BlockWeights; /// The maximum length of a block (in bytes). type BlockLength = BlockLength; /// The identifier used to distinguish between accounts. type AccountId = AccountId; /// The aggregated dispatch type that is available for extrinsics. type Call = Call; /// The lookup mechanism to get account ID from whatever is passed in dispatchers. type Lookup = AccountIdLookup<AccountId, ()>; /// The index type for storing how many extrinsics an account has signed. type Index = Index; /// The index type for blocks. type BlockNumber = BlockNumber; /// The type for hashing blocks and tries. type Hash = Hash; /// The hashing algorithm used. type Hashing = BlakeTwo256; /// The header type. type Header = generic::Header<BlockNumber, BlakeTwo256>; /// The ubiquitous event type. type Event = Event; /// The ubiquitous origin type. type Origin = Origin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; /// The weight of database operations that the runtime can invoke. type DbWeight = RocksDbWeight; /// Version of the runtime. type Version = Version; /// Converts a module to the index of the module in `construct_runtime!`. /// /// This type is being generated by `construct_runtime!`. type PalletInfo = PalletInfo; /// What to do if a new account is created. type OnNewAccount = (); /// What to do if an account is fully reaped from the system. type OnKilledAccount = (); /// The data to be stored in an account. type AccountData = pallet_balances::AccountData<Balance>; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = (); /// This is used as an identifier of the chain. 42 is the generic substrate prefix. type SS58Prefix = SS58Prefix; /// The set code logic, just the default since we're not a parachain. type OnSetCode = (); } impl pallet_randomness_collective_flip::Config for Runtime {} parameter_types! { pub const MaxAuthorities: u32 = 32; } impl pallet_aura::Config for Runtime { type AuthorityId = AuraId; type DisabledValidators = (); type MaxAuthorities = MaxAuthorities; } impl pallet_grandpa::Config for Runtime { type Event = Event; type Call = Call; type KeyOwnerProofSystem = (); type KeyOwnerProof = <Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::Proof;
KeyTypeId, GrandpaId, )>>::IdentificationTuple; type HandleEquivocation = (); type WeightInfo = (); type MaxAuthorities = MaxAuthorities; } parameter_types! { pub const MinimumPeriod: u64 = SLOT_DURATION / 2; } impl pallet_timestamp::Config for Runtime { /// A timestamp: milliseconds since the unix epoch. type Moment = u64; type OnTimestampSet = Aura; type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } parameter_types! { pub const ExistentialDeposit: u128 = 500; pub const MaxLocks: u32 = 50; } impl pallet_balances::Config for Runtime { type MaxLocks = MaxLocks; type MaxReserves = (); type ReserveIdentifier = [u8; 8]; /// The type for recording an account's balance. type Balance = Balance; /// The ubiquitous event type. type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = pallet_balances::weights::SubstrateWeight<Runtime>; } parameter_types! { pub const TransactionByteFee: Balance = 1; pub OperationalFeeMultiplier: u8 = 5; } impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = CurrencyAdapter<Balances, ()>; type TransactionByteFee = TransactionByteFee; type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = IdentityFee<Balance>; type FeeMultiplierUpdate = (); } impl pallet_sudo::Config for Runtime { type Event = Event; type Call = Call; } /// Configure the pallet-template in pallets/template. impl pallet_template::Config for Runtime { type Event = Event; } impl pallet_dvine::Config for Runtime { type Event = Event; } // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime where Block = Block, NodeBlock = opaque::Block, UncheckedExtrinsic = UncheckedExtrinsic { System: frame_system, RandomnessCollectiveFlip: pallet_randomness_collective_flip, Timestamp: pallet_timestamp, Aura: pallet_aura, Grandpa: pallet_grandpa, Balances: pallet_balances, TransactionPayment: pallet_transaction_payment, Sudo: pallet_sudo, // Include the custom logic from the pallet-template in the runtime. TemplateModule: pallet_template, Dvine: pallet_dvine, } ); /// The address format for describing accounts. pub type Address = sp_runtime::MultiAddress<AccountId, ()>; /// Block header type as expected by this runtime. pub type Header = generic::Header<BlockNumber, BlakeTwo256>; /// Block type as expected by this runtime. pub type Block = generic::Block<Header, UncheckedExtrinsic>; /// The SignedExtension to the basic transaction logic. pub type SignedExtra = ( frame_system::CheckSpecVersion<Runtime>, frame_system::CheckTxVersion<Runtime>, frame_system::CheckGenesis<Runtime>, frame_system::CheckEra<Runtime>, frame_system::CheckNonce<Runtime>, frame_system::CheckWeight<Runtime>, pallet_transaction_payment::ChargeTransactionPayment<Runtime>, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic<Address, Call, Signature, SignedExtra>; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, Block, frame_system::ChainContext<Runtime>, Runtime, AllPalletsWithSystem, >; impl_runtime_apis! { impl sp_api::Core<Block> for Runtime { fn version() -> RuntimeVersion { VERSION } fn execute_block(block: Block) { Executive::execute_block(block); } fn initialize_block(header: &<Block as BlockT>::Header) { Executive::initialize_block(header) } } impl sp_api::Metadata<Block> for Runtime { fn metadata() -> OpaqueMetadata { OpaqueMetadata::new(Runtime::metadata().into()) } } impl sp_block_builder::BlockBuilder<Block> for Runtime { fn apply_extrinsic(extrinsic: <Block as BlockT>::Extrinsic) -> ApplyExtrinsicResult { Executive::apply_extrinsic(extrinsic) } fn finalize_block() -> <Block as BlockT>::Header { Executive::finalize_block() } fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<<Block as BlockT>::Extrinsic> { data.create_extrinsics() } fn check_inherents( block: Block, data: sp_inherents::InherentData, ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block> for Runtime { fn validate_transaction( source: TransactionSource, tx: <Block as BlockT>::Extrinsic, block_hash: <Block as BlockT>::Hash, ) -> TransactionValidity { Executive::validate_transaction(source, tx, block_hash) } } impl sp_offchain::OffchainWorkerApi<Block> for Runtime { fn offchain_worker(header: &<Block as BlockT>::Header) { Executive::offchain_worker(header) } } impl sp_consensus_aura::AuraApi<Block, AuraId> for Runtime { fn slot_duration() -> sp_consensus_aura::SlotDuration { sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) } fn authorities() -> Vec<AuraId> { Aura::authorities().into_inner() } } impl sp_session::SessionKeys<Block> for Runtime { fn generate_session_keys(seed: Option<Vec<u8>>) -> Vec<u8> { opaque::SessionKeys::generate(seed) } fn decode_session_keys( encoded: Vec<u8>, ) -> Option<Vec<(Vec<u8>, KeyTypeId)>> { opaque::SessionKeys::decode_into_raw_public_keys(&encoded) } } impl fg_primitives::GrandpaApi<Block> for Runtime { fn grandpa_authorities() -> GrandpaAuthorityList { Grandpa::grandpa_authorities() } fn current_set_id() -> fg_primitives::SetId { Grandpa::current_set_id() } fn submit_report_equivocation_unsigned_extrinsic( _equivocation_proof: fg_primitives::EquivocationProof< <Block as BlockT>::Hash, NumberFor<Block>, >, _key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof, ) -> Option<()> { None } fn generate_key_ownership_proof( _set_id: fg_primitives::SetId, _authority_id: GrandpaId, ) -> Option<fg_primitives::OpaqueKeyOwnershipProof> { // NOTE: this is the only implementation possible since we've // defined our key owner proof type as a bottom type (i.e. a type // with no values). None } } impl frame_system_rpc_runtime_api::AccountNonceApi<Block, AccountId, Index> for Runtime { fn account_nonce(account: AccountId) -> Index { System::account_nonce(account) } } impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, Balance> for Runtime { fn query_info( uxt: <Block as BlockT>::Extrinsic, len: u32, ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo<Balance> { TransactionPayment::query_info(uxt, len) } fn query_fee_details( uxt: <Block as BlockT>::Extrinsic, len: u32, ) -> pallet_transaction_payment::FeeDetails<Balance> { TransactionPayment::query_fee_details(uxt, len) } } #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark<Block> for Runtime { fn benchmark_metadata(extra: bool) -> ( Vec<frame_benchmarking::BenchmarkList>, Vec<frame_support::traits::StorageInfo>, ) { use frame_benchmarking::{list_benchmark, baseline, Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use baseline::Pallet as BaselineBench; let mut list = Vec::<BenchmarkList>::new(); list_benchmark!(list, extra, frame_benchmarking, BaselineBench::<Runtime>); list_benchmark!(list, extra, frame_system, SystemBench::<Runtime>); list_benchmark!(list, extra, pallet_balances, Balances); list_benchmark!(list, extra, pallet_timestamp, Timestamp); list_benchmark!(list, extra, pallet_template, TemplateModule); let storage_info = AllPalletsWithSystem::storage_info(); return (list, storage_info) } fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result<Vec<frame_benchmarking::BenchmarkBatch>, sp_runtime::RuntimeString> { use frame_benchmarking::{baseline, Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; use frame_system_benchmarking::Pallet as SystemBench; use baseline::Pallet as BaselineBench; impl frame_system_benchmarking::Config for Runtime {} impl baseline::Config for Runtime {} let whitelist: Vec<TrackedStorageKey> = vec![ // Block Number hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), // Total Issuance hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), // Execution Phase hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), // Event Count hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), // System Events hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), ]; let mut batches = Vec::<BenchmarkBatch>::new(); let params = (&config, &whitelist); add_benchmark!(params, batches, frame_benchmarking, BaselineBench::<Runtime>); add_benchmark!(params, batches, frame_system, SystemBench::<Runtime>); add_benchmark!(params, batches, pallet_balances, Balances); add_benchmark!(params, batches, pallet_timestamp, Timestamp); add_benchmark!(params, batches, pallet_template, TemplateModule); Ok(batches) } } }
type KeyOwnerIdentification = <Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(
dump_kg2_equivalencies.py
#!/bin/env python3 """ This script creates a TSV of node pairs linked by an 'equivalent_to'/'same_as' relationship in KG2. The TSV file is created in the same directory the script is run from. Example of rows in the output file: UMLS:C0027358 UMLS:C0014563 UMLS:C0878440 UMLS:C0014563 Usage: python dump_kg2_equivalencies.py """ import csv import os import sys import traceback from typing import List, Dict from neo4j import GraphDatabase sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../") # code directory from RTXConfiguration import RTXConfiguration def
(cypher_query: str, kg="KG2") -> List[Dict[str, any]]: # This function sends a cypher query to neo4j (either KG1 or KG2) and returns results rtxc = RTXConfiguration() if kg == "KG2": rtxc.live = "KG2" try: driver = GraphDatabase.driver(rtxc.neo4j_bolt, auth=(rtxc.neo4j_username, rtxc.neo4j_password)) with driver.session() as session: print(f"Sending cypher query to {kg} neo4j") query_results = session.run(cypher_query).data() print(f"Got {len(query_results)} results back from neo4j") driver.close() except Exception: tb = traceback.format_exc() error_type, error, _ = sys.exc_info() print(f"Encountered an error interacting with {kg} neo4j. {tb}") return [] else: return query_results def dump_kg2_equivalencies(): # This function creates a TSV file of node pairs linked by an 'equivalent_to' or 'same_as' relationship in KG2 cypher_query = f"match (n1)-[:equivalent_to|:same_as]->(n2) return distinct n1.id, n2.id" equivalent_node_pairs = _run_cypher_query(cypher_query) if equivalent_node_pairs: column_headers = equivalent_node_pairs[0].keys() file_name = "kg2_equivalencies.tsv" with open(file_name, "w+") as output_file: dict_writer = csv.DictWriter(output_file, column_headers, delimiter='\t') dict_writer.writeheader() dict_writer.writerows(equivalent_node_pairs) print(f"Successfully created file '{file_name}' containing results") else: print(f"Sorry, couldn't get equivalency data. No file created.") def main(): dump_kg2_equivalencies() if __name__ == "__main__": main()
_run_cypher_query
glob.py
""" Filename globbing utility. Mostly a copy of `glob` from Python 3.5. Changes include: * `yield from` and PEP3102 `*` removed. * Hidden files are not ignored. """ import os import re import fnmatch
"""Return a list of paths matching a pathname pattern. The pattern may contain simple shell-style wildcards a la fnmatch. However, unlike fnmatch, filenames starting with a dot are special cases that are not matched by '*' and '?' patterns. If recursive is true, the pattern '**' will match any files and zero or more directories and subdirectories. """ return list(iglob(pathname, recursive=recursive)) def iglob(pathname, recursive=False): """Return an iterator which yields the paths matching a pathname pattern. The pattern may contain simple shell-style wildcards a la fnmatch. However, unlike fnmatch, filenames starting with a dot are special cases that are not matched by '*' and '?' patterns. If recursive is true, the pattern '**' will match any files and zero or more directories and subdirectories. """ it = _iglob(pathname, recursive) if recursive and _isrecursive(pathname): s = next(it) # skip empty string assert not s return it def _iglob(pathname, recursive): dirname, basename = os.path.split(pathname) glob_in_dir = glob2 if recursive and _isrecursive(basename) else glob1 if not has_magic(pathname): if basename: if os.path.lexists(pathname): yield pathname else: # Patterns ending with a slash should match only directories if os.path.isdir(dirname): yield pathname return if not dirname: yield from glob_in_dir(dirname, basename) return # `os.path.split()` returns the argument itself as a dirname if it is a # drive or UNC path. Prevent an infinite recursion if a drive or UNC path # contains magic characters (i.e. r'\\?\C:'). if dirname != pathname and has_magic(dirname): dirs = _iglob(dirname, recursive) else: dirs = [dirname] if not has_magic(basename): glob_in_dir = glob0 for dirname in dirs: for name in glob_in_dir(dirname, basename): yield os.path.join(dirname, name) # These 2 helper functions non-recursively glob inside a literal directory. # They return a list of basenames. `glob1` accepts a pattern while `glob0` # takes a literal basename (so it only has to check for its existence). def glob1(dirname, pattern): if not dirname: if isinstance(pattern, bytes): dirname = os.curdir.encode("ASCII") else: dirname = os.curdir try: names = os.listdir(dirname) except OSError: return [] return fnmatch.filter(names, pattern) def glob0(dirname, basename): if not basename: # `os.path.split()` returns an empty basename for paths ending with a # directory separator. 'q*x/' should match only directories. if os.path.isdir(dirname): return [basename] else: if os.path.lexists(os.path.join(dirname, basename)): return [basename] return [] # This helper function recursively yields relative pathnames inside a literal # directory. def glob2(dirname, pattern): assert _isrecursive(pattern) yield pattern[:0] for x in _rlistdir(dirname): yield x # Recursively yields relative pathnames inside a literal directory. def _rlistdir(dirname): if not dirname: if isinstance(dirname, bytes): dirname = os.curdir.encode("ASCII") else: dirname = os.curdir try: names = os.listdir(dirname) except os.error: return for x in names: yield x path = os.path.join(dirname, x) if dirname else x for y in _rlistdir(path): yield os.path.join(x, y) magic_check = re.compile("([*?[])") magic_check_bytes = re.compile(b"([*?[])") def has_magic(s): if isinstance(s, bytes): match = magic_check_bytes.search(s) else: match = magic_check.search(s) return match is not None def _isrecursive(pattern): if isinstance(pattern, bytes): return pattern == b"**" else: return pattern == "**" def escape(pathname): """Escape all special characters.""" # Escaping is done by wrapping any of "*?[" between square brackets. # Metacharacters do not work in the drive part and shouldn't be escaped. drive, pathname = os.path.splitdrive(pathname) if isinstance(pathname, bytes): pathname = magic_check_bytes.sub(br"[\1]", pathname) else: pathname = magic_check.sub(r"[\1]", pathname) return drive + pathname
__all__ = ["glob", "iglob", "escape"] def glob(pathname, recursive=False):
AbstractContainer.js
import React, { Component } from 'react'; /** * Abstract (base) class for container of React Component children with a style. * * @extends Component */ export default class
extends Component { /** * AbstractContainer component's property types. * * @static */ static propTypes = { children: React.PropTypes.node, /** * The event handler/listener to be invoked when this AbstractContainer * is clicked on Web or pressed on React Native. If onClick is defined * and touchFeedback is undefined, touchFeedback is considered defined * as true. */ onClick: React.PropTypes.func, /** * The style (as in stylesheet) to be applied to this AbstractContainer. */ style: React.PropTypes.object, /** * True if this instance is to provide visual feedback when touched; * otherwise, false. If touchFeedback is undefined and onClick is * defined, touchFeedback is considered defined as true. */ touchFeedback: React.PropTypes.bool, /** * True if this AbstractContainer is to be visible or false if this * instance is to be hidden or not rendered at all. */ visible: React.PropTypes.bool } /** * Renders this AbstractContainer as a React Component of a specific type. * * @param {string|ReactClass} type - The type of the React Component which * is to be rendered. * @param {Object|undefined} props - The read-only React Component * properties, if any, to render. If undefined, the props of this instance * will be rendered. * @protected * @returns {ReactElement} */ _render(type, props) { const { children, /* eslint-disable no-unused-vars */ // The following properties are defined for the benefit of // AbstractContainer and its extenders so they are to not be // propagated. touchFeedback, visible, /* eslint-enable no-unused-vars */ ...filteredProps } = props || this.props; return React.createElement(type, filteredProps, children); } }
AbstractContainer
cron_task.go
package cron import ( "time" "os" "os/signal" "github.com/irisnet/rainbow-sync/service/iris/logger" "github.com/irisnet/rainbow-sync/service/iris/db" model "github.com/irisnet/rainbow-sync/service/iris/model" "github.com/irisnet/rainbow-sync/service/iris/block" "github.com/irisnet/rainbow-sync/service/iris/helper" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" "fmt" ) type CronService struct{} func (s *CronService) StartCronService() { fn := func() { logger.Debug("Start CronService ...") ticker := time.NewTicker(5 * time.Minute) defer ticker.Stop() stop := make(chan os.Signal) signal.Notify(stop, os.Interrupt) fn_update := func() { defer func() { if r := recover(); r != nil { logger.Error("CronService have error", logger.Any("err", r)) } }() runValue := true skip := 0 for runValue { total, err := GetUnknownTxsByPage(skip, 20) if err != nil { logger.Error("GetUnknownTxsByPage have error", logger.String("err", err.Error())) } if total < 20 { runValue = false logger.Debug("finish GetUnknownTxsByPage.") } else { skip = skip + total logger.Debug("continue GetUnknownTxsByPage", logger.Int("skip", skip)) } } logger.Debug("finish update txs.") } fn_update() for { select { case <-ticker.C: fn_update() case <-stop: close(stop) logger.Debug(" CronService Quit...") return } } } go fn() } func GetUnknownTxsByPage(skip, limit int) (int, error) { var res []model.IrisTx q := bson.M{"status": "unknown"} sorts := []string{"-height"} fn := func(c *mgo.Collection) error { return c.Find(q).Sort(sorts...).Skip(skip).Limit(limit).All(&res) } if err := db.ExecCollection(model.CollectionNameIrisTx, fn); err != nil { return 0, err } if len(res) > 0 { doWork(res) } return len(res), nil } func GetCoinFlowByHash(txhash string) ([]model.IrisAssetDetail, error) { var res []model.IrisAssetDetail q := bson.M{"tx_hash": txhash} sorts := []string{"-height"} fn := func(c *mgo.Collection) error { return c.Find(q).Sort(sorts...).All(&res) } if err := db.ExecCollection(model.CollectionNameAssetDetail, fn); err != nil { return nil, err } return res, nil } func doWork(iristxs []model.IrisTx) { client := helper.GetClient() defer func() { client.Release() }() for _, val := range iristxs { txs, err := ParseUnknownTxs(val.Height, client) if err != nil { continue } if err := UpdateUnknowTxs(txs); err != nil { logger.Warn("UpdateUnknowTxs have error", logger.String("error", err.Error())) } if err := UpdateCoinFlow(val.TxHash, val.Height, client); err != nil { logger.Warn("UpdateCoinFlow have error", logger.String("error", err.Error())) } } } func ParseUnknownTxs(height int64, client *helper.Client) (resIrisTxs []*model.IrisTx, err error) { var irisBlock block.Iris_Block resIrisTxs, err = irisBlock.ParseIrisTxs(height, client) if err != nil { logger.Error("Parse block txs fail", logger.Int64("block", height), logger.String("err", err.Error())) } return } func ParseCoinflows(height int64, client *helper.Client) (coinflows []*model.IrisAssetDetail, err error) { var irisBlock block.Iris_Block coinflows, err = irisBlock.ParseIrisAssetDetail(height, client) if err != nil { logger.Error("Parse block coinflow fail", logger.Int64("block", height), logger.String("err", err.Error())) } return } func UpdateUnknowTxs(iristx []*model.IrisTx) error
func UpdateCoinFlow(txhash string, height int64, client *helper.Client) error { coinflows, err := GetCoinFlowByHash(txhash) if err != nil { return err } var ops []txn.Op if len(coinflows) > 0 { return fmt.Errorf("coinflow not need to update") } assetdetail, err := ParseCoinflows(height, client) for _, dbval := range assetdetail { ops = append(ops, txn.Op{ C: model.CollectionNameAssetDetail, Id: bson.NewObjectId(), Insert: dbval, }) } if len(ops) > 0 { err := db.Txn(ops) if err != nil { return err } } return nil }
{ update_fn := func(tx *model.IrisTx) error { fn := func(c *mgo.Collection) error { return c.Update(bson.M{"tx_hash": tx.TxHash}, bson.M{"$set": bson.M{"actual_fee": tx.ActualFee, "status": tx.Status, "tags": tx.Tags}}) } if err := db.ExecCollection(model.CollectionNameIrisTx, fn); err != nil { return err } return nil } for _, dbval := range iristx { update_fn(dbval) } return nil }
request.rs
use std::fmt; use anyhow::Error; use crate::cmd::{ Cmd, CtrRequest, Mode, SetBrightness, TemporaryOn, CH_INDEX, CMD_INDEX, CRC_INDEX, MESSAGE_LENGTH, };
const ST: u8 = 171; const SP: u8 = 172; const RES: u8 = 0; #[derive(Debug, Clone, Copy, Default)] pub struct Request { pub mode: Mode, pub ctr: CtrRequest, pub ch: u8, pub cmd: Cmd, pub id: u32, } impl Request { pub fn ch(&self) -> u8 { self.ch } pub fn set_ch(&mut self, ch: u8) -> Result<(), Error> { ensure!(ch < 64, "The ch value must be between 0 and 63"); self.ch = ch; Ok(()) } pub fn to_message(self) -> [u8; MESSAGE_LENGTH] { let mut msg = [0; MESSAGE_LENGTH]; msg[0] = ST; msg[1] = self.mode as u8; msg[2] = self.ctr as u8; msg[3] = RES; msg[CH_INDEX] = self.ch; msg[CMD_INDEX] = self.cmd.as_u8(); match self.cmd { Cmd::SetBrightness(br) => match br { SetBrightness::Fmt1(d0) => { msg[6] = 1; msg[7] = d0; } SetBrightness::Fmt3(d) => { msg[6] = 3; msg[7] = d[0]; msg[8] = d[1]; msg[9] = d[2]; } }, Cmd::BrightReg(reg) => { msg[6] = 1; msg[7] = reg; } Cmd::TemporaryOn(tem) => match tem { TemporaryOn::Fmt1(d0) => { msg[6] = 1; msg[7] = d0; } TemporaryOn::Fmt2(d) => { msg[6] = 2; msg[7] = d[0]; msg[8] = d[1]; } }, Cmd::Service(serv) => { msg[7] = if serv { 1 } else { 0 }; } Cmd::ClearMemory => { msg[6] = 4; msg[7] = 170; msg[8] = 85; msg[9] = 170; msg[10] = 85; } Cmd::Off | Cmd::BrightDown | Cmd::On | Cmd::BrightUp | Cmd::Switch | Cmd::BrightBack | Cmd::LoadPreset | Cmd::SavePreset | Cmd::Unbind | Cmd::StopBright | Cmd::BrightStepDown | Cmd::BrightStepUp | Cmd::Bind | Cmd::RollColor | Cmd::SwitchColor | Cmd::SwitchMode | Cmd::SpeedMode | Cmd::BatteryLow | Cmd::SensTempHumi | Cmd::Modes | Cmd::ReadState | Cmd::WriteState | Cmd::SendState => { // default parameters } } let mut idx = 11; for b in self.id.to_le_bytes().iter() { msg[idx] = *b; idx += 1; } let mut sum: u32 = 0; for byte in msg.iter().take(15) { sum += *byte as u32; } msg[CRC_INDEX] = sum.to_le_bytes()[0]; msg[16] = SP; msg } } impl fmt::Display for Request { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "ST:171 MODE:{} CTR:{} CH:{} CMD:{} ID:{} SP:172", self.mode, self.ctr, self.ch, self.cmd, self.id ) } } pub fn set_mode(md: Mode) -> Request { Request { mode: md, ..Default::default() } } pub fn bind(md: Mode, ch: u8) -> Request { Request { mode: md, ctr: CtrRequest::BindModeOn, cmd: Cmd::Bind, ch, ..Default::default() } } #[cfg(test)] mod test { use crate::cmd::request::Request; use crate::cmd::*; #[test] pub fn test_crc() { let mut req = Request::default(); req.mode = Mode::TxF; req.set_ch(5).unwrap(); req.cmd = Cmd::Bind; assert_eq!( [171, 2, 0, 0, 5, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 193, 172], req.to_message() ); req.cmd = Cmd::Service(true); assert_eq!( [171, 2, 0, 0, 5, 131, 0, 1, 0, 0, 0, 0, 0, 0, 0, 54, 172], req.to_message() ); } }
construct_sagemaker_role.py
# ***************************************************************************** # * Copyright 2020 Amazon.com, Inc. and its affiliates. All Rights Reserved. * # * # Licensed under the Amazon Software License (the "License"). * # You may not use this file except in compliance with the License. * # A copy of the License is located at * # * # http://aws.amazon.com/asl/ * # * # or in the "license" file accompanying this file. This file is distributed * # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * # express or implied. See the License for the specific language governing * # permissions and limitations under the License. * # ***************************************************************************** from aws_cdk import aws_iam, core from aws_cdk.aws_iam import IManagedPolicy, ServicePrincipal class ConstructSageMakerRole(aws_iam.Role): """ Custom SageMaker role construct , with minimum permissions required to run the preprocessor """ def
(self, scope: core.Construct, id: str, managed_policy: IManagedPolicy, role_name: str = None): # S3 Bucket for SageMaker internal access s3_sagemaker_bucket_access = aws_iam.PolicyDocument( statements=[ # S3 SageMaker Internal access aws_iam.PolicyStatement(actions=["s3:GetObject", "s3:PutObject", "s3:ListBucket"], resources=["arn:aws:s3:::*sagemaker*"]) ] ) # SageMaker Cloud Watch Access cloudwatch_access = aws_iam.PolicyDocument( statements=[aws_iam.PolicyStatement(actions=["cloudwatch:PutMetricData", "cloudwatch:GetMetricData", "cloudwatch:GetMetricStatistics", "cloudwatch:ListMetrics", "logs:CreateLogGroup", "logs:CreateLogStream", "logs:DescribeLogStreams", "logs:PutLogEvents", "logs:GetLogEvents"], resources=["*"]) ]) super().__init__(scope, id, assumed_by=ServicePrincipal("sagemaker.amazonaws.com"), description="The sagemaker role to access the data and ecr", inline_policies={ "S3SageMakerBucketAccess": s3_sagemaker_bucket_access, "CloudWatchAccess": cloudwatch_access }, managed_policies=[managed_policy], role_name=role_name )
__init__
Profile.js
import React, { useEffect, useState } from "react"; import { Link } from "react-router-dom"; import { Badge, Button, Card, CardBody, CardHeader, CardTitle, Col, Container, DropdownItem, DropdownMenu, DropdownToggle, Media, Row, UncontrolledDropdown } from "reactstrap"; import { Briefcase, Home, MapPin, MessageSquare, MoreHorizontal } from "react-feather"; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; import { faGlobe } from "@fortawesome/free-solid-svg-icons"; import { faHeart } from "@fortawesome/free-regular-svg-icons"; import { faFacebook, faInstagram, faLinkedin, faTwitter } from "@fortawesome/free-brands-svg-icons"; import avatar1 from "../../assets/img/avatars/avatar.jpg"; import avatar2 from "../../assets/img/avatars/avatar-2.jpg"; import avatar4 from "../../assets/img/avatars/avatar-4.jpg"; import avatar5 from "../../assets/img/avatars/avatar-5.jpg"; import unsplash1 from "../../assets/img/photos/unsplash-1.jpg"; import unsplash2 from "../../assets/img/photos/unsplash-2.jpg"; import UserApi from '../../api/UserApi'; const ProfileDetails = () => { const [userInfo, setUserInfo] = useState({}); useEffect(() => { const getProfile = async () => { const result = await UserApi.getProfile(); // TODO update storage // TODO update redux setUserInfo(result); } getProfile(); }, []); return ( <Card> <CardHeader> <CardTitle tag="h5" className="mb-0"> Profile Details </CardTitle> </CardHeader> <CardBody className="text-center"> <img src={userInfo.avatarUrl ? `http://127.0.0.1:8887/Avatar/${userInfo.avatarUrl}` : avatar2} alt={userInfo.firstName + " " + userInfo.lastName} className="img-fluid rounded-circle mb-2" width="128" height="128" /> <CardTitle tag="h5" className="mb-0"> {userInfo.firstName + " " + userInfo.lastName} </CardTitle> <div className="text-muted mb-2">{userInfo.role}</div> <div> <Button size="sm" color="primary" className="mr-1"> Follow </Button> <Button size="sm" color="primary"> <MessageSquare width={16} height={16} /> Message </Button> </div> </CardBody> <hr className="my-0" /> <CardBody> <CardTitle tag="h5">Skills</CardTitle> <Badge color="primary" className="mr-1 my-1"> HTML </Badge> <Badge color="primary" className="mr-1 my-1"> JavaScript </Badge> <Badge color="primary" className="mr-1 my-1"> Sass </Badge> <Badge color="primary" className="mr-1 my-1"> Angular </Badge> <Badge color="primary" className="mr-1 my-1"> Vue </Badge> <Badge color="primary" className="mr-1 my-1"> React </Badge> <Badge color="primary" className="mr-1 my-1"> Redux </Badge> <Badge color="primary" className="mr-1 my-1"> UI </Badge> <Badge color="primary" className="mr-1 my-1"> UX </Badge> </CardBody> <hr className="my-0" /> <CardBody> <CardTitle tag="h5">About</CardTitle> <ul className="list-unstyled mb-0"> <li className="mb-1"> <Home width={14} height={14} className="mr-1" /> Lives in{" "} <Link to="/dashboard/default">San Francisco, SA</Link> </li> <li className="mb-1"> <Briefcase width={14} height={14} className="mr-1" /> Works at{" "} <Link to="/dashboard/default">GitHub</Link> </li> <li className="mb-1"> <MapPin width={14} height={14} className="mr-1" /> From{" "} <Link to="/dashboard/default">Boston</Link> </li> </ul> </CardBody> <hr className="my-0" /> <CardBody> <CardTitle tag="h5">Elsewhere</CardTitle> <ul className="list-unstyled mb-0"> <li className="mb-1"> <FontAwesomeIcon icon={faGlobe} fixedWidth className="mr-1" /> <Link to="/dashboard/default">staciehall.co</Link> </li> <li className="mb-1"> <FontAwesomeIcon icon={faTwitter} fixedWidth className="mr-1" /> <Link to="/dashboard/default">Twitter</Link> </li> <li className="mb-1"> <FontAwesomeIcon icon={faFacebook} fixedWidth className="mr-1" /> <Link to="/dashboard/default">Facebook</Link> </li> <li className="mb-1"> <FontAwesomeIcon icon={faInstagram} fixedWidth className="mr-1" /> <Link to="/dashboard/default">Instagram</Link> </li> <li className="mb-1"> <FontAwesomeIcon icon={faLinkedin} fixedWidth className="mr-1" /> <Link to="/dashboard/default">LinkedIn</Link> </li> </ul> </CardBody> </Card> ) }; const Activities = () => ( <Card> <CardHeader> <div className="card-actions float-right"> <UncontrolledDropdown> <DropdownToggle tag="a"> <MoreHorizontal /> </DropdownToggle> <DropdownMenu right> <DropdownItem>Action</DropdownItem> <DropdownItem>Another Action</DropdownItem> <DropdownItem>Something else here</DropdownItem> </DropdownMenu> </UncontrolledDropdown> </div> <CardTitle tag="h5" className="mb-0"> Activities </CardTitle> </CardHeader> <CardBody> <Media> <img src={avatar5} width="36" height="36" className="rounded-circle mr-2" alt="Ashley Briggs" /> <Media body> <small className="float-right text-navy">5m ago</small> <strong>Ashley Briggs</strong> started following{" "} <strong>Stacie Hall</strong> <br /> <small className="text-muted">Today 7:51 pm</small> <br /> </Media> </Media> <hr /> <Media> <img src={avatar1} width="36" height="36" className="rounded-circle mr-2" alt="Chris Wood" /> <Media body> <small className="float-right text-navy">30m ago</small> <strong>Chris Wood</strong> posted something on{" "} <strong>Stacie Hall</strong>'s timeline <br /> <small className="text-muted">Today 7:21 pm</small> <div className="border text-sm text-muted p-2 mt-1"> Etiam rhoncus. Maecenas tempus, tellus eget condimentum rhoncus, sem quam semper libero, sit amet adipiscing sem neque sed ipsum. Nam quam nunc, blandit vel, luctus pulvinar, hendrerit id, lorem. Maecenas nec odio et ante tincidunt tempus. Donec vitae sapien ut libero venenatis faucibus. Nullam quis ante. </div> <Button size="sm" color="danger" className="mt-1"> <FontAwesomeIcon icon={faHeart} fixedWidth /> Like </Button> </Media> </Media> <hr /> <Media> <img src={avatar4} width="36" height="36" className="rounded-circle mr-2" alt="Stacie Hall" /> <Media body> <small className="float-right text-navy">1h ago</small> <strong>Stacie Hall</strong> posted a new blog <br /> <small className="text-muted">Today 6:35 pm</small> </Media> </Media> <hr /> <Media> <img src={avatar2} width="36" height="36" className="rounded-circle mr-2" alt="Carl Jenkins" /> <Media body> <small className="float-right text-navy">3h ago</small> <strong>Carl Jenkins</strong> posted two photos on{" "} <strong>Stacie Hall</strong>'s timeline <br /> <small className="text-muted">Today 5:12 pm</small> <div className="row no-gutters mt-1"> <div className="col-6 col-md-4 col-lg-4 col-xl-3"> <img src={unsplash1} className="img-fluid pr-2" alt="Unsplash" /> </div> <div className="col-6 col-md-4 col-lg-4 col-xl-3"> <img src={unsplash2} className="img-fluid pr-2" alt="Unsplash" /> </div> </div> <Button size="sm" color="danger" className="mt-1"> <FontAwesomeIcon icon={faHeart} fixedWidth /> Like </Button> </Media> </Media> <hr /> <Media> <img src={avatar2} width="36" height="36" className="rounded-circle mr-2" alt="Carl Jenkins" /> <Media body> <small className="float-right text-navy">1d ago</small> <strong>Carl Jenkins</strong> started following{" "} <strong>Stacie Hall</strong> <br /> <small className="text-muted">Yesterday 3:12 pm</small> <Media className="mt-1"> <img src={avatar4} width="36" height="36" className="rounded-circle mr-2" alt="Stacie Hall" /> <Media body className="pl-3"> <div className="border text-sm text-muted p-2 mt-1"> Nam quam nunc, blandit vel, luctus pulvinar, hendrerit id, lorem. Maecenas nec odio et ante tincidunt tempus. </div> </Media> </Media> </Media> </Media> <hr /> <Media> <img src={avatar4} width="36" height="36" className="rounded-circle mr-2" alt="Stacie Hall" /> <Media body> <small className="float-right text-navy">1d ago</small> <strong>Stacie Hall</strong> posted a new blog <br /> <small className="text-muted">Yesterday 2:43 pm</small> </Media> </Media> <hr /> <Media> <img src={avatar1} width="36" height="36" className="rounded-circle mr-2" alt="Chris Wood" /> <Media body> <small className="float-right text-navy">1d ago</small> <strong>Chris Wood</strong> started following{" "} <strong>Stacie Hall</strong> <br /> <small className="text-muted">Yesterdag 1:51 pm</small> </Media> </Media> <hr /> <Button color="primary" block> Load more </Button> </CardBody> </Card> ); const Profile = () => ( <Container fluid className="p-0"> <h1 className="h3 mb-3">Profile</h1> <Row> <Col md="4" xl="3">
</Col> </Row> </Container> ); export default Profile;
<ProfileDetails /> </Col> <Col md="8" xl="9"> <Activities />
output_csv.go
package trdsql import ( "encoding/csv" ) // CSVWriter provides methods of the Writer interface. type CSVWriter struct { writer *csv.Writer results []string outHeader bool } // NewCSVWriter returns CSVWriter. func NewCSVWriter(writeOpts *WriteOpts) *CSVWriter { var err error w := &CSVWriter{} w.writer = csv.NewWriter(writeOpts.OutStream) w.writer.Comma, err = delimiter(writeOpts.OutDelimiter) if err != nil { debug.Printf("%s\n", err) } w.outHeader = writeOpts.OutHeader return w } // PreWrite is output of header and preparation. func (w *CSVWriter) PreWrite(columns []string, types []string) error { if w.outHeader
w.results = make([]string, len(columns)) return nil } // WriteRow is row write. func (w *CSVWriter) WriteRow(values []interface{}, columns []string) error { for i, col := range values { w.results[i] = ValString(col) } err := w.writer.Write(w.results) return err } // PostWrite is flush. func (w *CSVWriter) PostWrite() error { w.writer.Flush() return nil }
{ err := w.writer.Write(columns) if err != nil { return err } }
rest.js
/** * Lo-Dash 2.4.1 (Custom Build) <http://lodash.com/> * Build: `lodash modularize exports="amd" -o ./compat/` * Copyright 2012-2013 The Dojo Foundation <http://dojofoundation.org/> * Based on Underscore.js 1.5.2 <http://underscorejs.org/LICENSE> * Copyright 2009-2013 Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors * Available under MIT license <http://lodash.com/license> */ define(['../functions/createCallback', '../internals/slice'], function(createCallback, slice) { /* Native method shortcuts for methods with the same name as other `lodash` methods */ var nativeMax = Math.max; /** * The opposite of `_.initial` this method gets all but the first element or * first `n` elements of an array. If a callback function is provided elements * at the beginning of the array are excluded from the result as long as the * callback returns truey. The callback is bound to `thisArg` and invoked * with three arguments; (value, index, array). * * If a property name is provided for `callback` the created "_.pluck" style * callback will return the property value of the given element. * * If an object is provided for `callback` the created "_.where" style callback * will return `true` for elements that have the properties of the given object, * else `false`. * * @static * @memberOf _ * @alias drop, tail * @category Arrays * @param {Array} array The array to query. * @param {Function|Object|number|string} [callback=1] The function called * per element or the number of elements to exclude. If a property name or * object is provided it will be used to create a "_.pluck" or "_.where" * style callback, respectively. * @param {*} [thisArg] The `this` binding of `callback`. * @returns {Array} Returns a slice of `array`. * @example *
* // => [3] * * _.rest([1, 2, 3], function(num) { * return num < 3; * }); * // => [3] * * var characters = [ * { 'name': 'barney', 'blocked': true, 'employer': 'slate' }, * { 'name': 'fred', 'blocked': false, 'employer': 'slate' }, * { 'name': 'pebbles', 'blocked': true, 'employer': 'na' } * ]; * * // using "_.pluck" callback shorthand * _.pluck(_.rest(characters, 'blocked'), 'name'); * // => ['fred', 'pebbles'] * * // using "_.where" callback shorthand * _.rest(characters, { 'employer': 'slate' }); * // => [{ 'name': 'pebbles', 'blocked': true, 'employer': 'na' }] */ function rest(array, callback, thisArg) { if (typeof callback != 'number' && callback != null) { var n = 0, index = -1, length = array ? array.length : 0; callback = createCallback(callback, thisArg, 3); while (++index < length && callback(array[index], index, array)) { n++; } } else { n = (callback == null || thisArg) ? 1 : nativeMax(0, callback); } return slice(array, n); } return rest; });
* _.rest([1, 2, 3]); * // => [2, 3] * * _.rest([1, 2, 3], 2);
iocReleaseCreateDb.py
#!/usr/bin/env python import sys import os import subprocess import optparse __all__ = ['export_db_file', 'module_versions', 'process_options'] def export_db_file(module_versions, path=None):
def module_versions(release_path, site_path): """ Return a dictionary containing module names and versions. """ # first grab EPICS_BASE_VER from RELEASE_SITE file, if it's there siteBaseVer = "Nada" openSiteFile = 1 try: site_file = open(site_path, 'r') except IOError, e: #sys.stderr.write('Could not open "%s": %s\n' % (site_path, e.strerror)) openSiteFile = 0 if openSiteFile: for line in site_file: # Remove comments line = line.partition('#')[0] # Turn 'a = b' into a key/value pair and remove leading and trailing whitespace (key, sep, value) = line.partition('=') key = key.strip() value = value.strip() # save EPICS_BASE_VER, if it's in there if key.startswith('EPICS_BASE_VER'): siteBaseVer = value break site_file.close() # now get all the modules try: release_file = open(release_path, 'r') except IOError, e: sys.stderr.write('Could not open "%s": %s\n' % (release_path, e.strerror)) return None release_file_dict = {} for line in release_file: # Remove comments line = line.partition('#')[0] # Turn 'a = b' into a key/value pair and remove leading and trailing whitespace (key, sep, value) = line.partition('=') key = key.strip() value = value.strip() # Add the key/value pair to the dictionary if the key ends with _MODULE_VERSION if key.endswith('_MODULE_VERSION'): # if BASE_MODULE_VERSION is set to EPICS_BASE_VER macro from RELEASE_SITE, # capture it here if key == "BASE_MODULE_VERSION" and value == "$(EPICS_BASE_VER)": if siteBaseVer != "Nada": release_file_dict[key] = siteBaseVer else: # don't set BASE at all pass else: release_file_dict[key] = value release_file.close() return release_file_dict def process_options(argv): """ Return parsed command-line options found in the list of arguments, `argv`, or ``sys.argv[2:]`` if `argv` is `None`. """ if argv is None: argv = sys.argv[1:] # usage = 'Usage: %prog RELEASE_FILE [options]' usage = 'Usage: %prog RELEASE_FILE RELEASE_SITE_FILE [options]' version = '%prog 0.1' parser = optparse.OptionParser(usage=usage, version=version) parser.add_option('-v', '--verbose', action='store_true', dest='verbose', help='print verbose output') parser.add_option("-e", "--db_file", action="store", type="string", dest="db_file", metavar="FILE", help="module database file path") parser.set_defaults(verbose=False, db_file=None) (options, args) = parser.parse_args(argv) if len(args) != 2: parser.error("incorrect number of arguments") options.release_file_path = os.path.normcase(args[0]) options.release_site_file_path = os.path.normcase(args[1]) return options def main(argv=None): options = process_options(argv) versions = module_versions(options.release_file_path, options.release_site_file_path) export_db_file(versions, options.db_file) return 0 if __name__ == '__main__': status = main() sys.exit(status)
""" Use the contents of a dictionary of module versions to create a database of module release stringin PVs. The database is written to stdout if path is not provided or is None. """ out_file = sys.stdout idx = 0 idxMax = 20 if path: try: out_file = open(path, 'w') except IOError, e: sys.stderr.write('Could not open "%s": %s\n' % (path, e.strerror)) return None sorted_module_versions = [(key, module_versions[key]) for key in sorted(module_versions.keys())] print >> out_file, '#==============================================================================' print >> out_file, '#' print >> out_file, '# Abs: LCLS read-only stringin records for Modules specified in configure/RELEASE' print >> out_file, '#' print >> out_file, '# Name: iocRelease.db' print >> out_file, '#' print >> out_file, '# Note: generated automatically by $IOCADMIN/bin/$EPICS_HOST_ARCH/iocReleaseCreateDb.py' print >> out_file, '#' print >> out_file, '#==============================================================================' for [key, module_version] in sorted_module_versions: """ strip off the _MODULE_VERSION from key for PV NAME """ x = key.replace("_MODULE_VERSION","",1) if idx >= idxMax: break print >> out_file, 'record(stringin, "$(IOC):RELEASE%02d") {' % idx print >> out_file, ' field(DESC, "%s")' % x print >> out_file, ' field(PINI, "YES")' print >> out_file, ' field(VAL, "%s")' % module_version print >> out_file, ' #field(ASG, "some read only grp")' print >> out_file, '}' idx = idx + 1 while idx < idxMax: print >> out_file, 'record(stringin, "$(IOC):RELEASE%02d") {' % idx print >> out_file, ' field(DESC, "Not Applicable")' print >> out_file, ' field(PINI, "YES")' print >> out_file, ' field(VAL, "Not Applicable")' print >> out_file, ' #field(ASG, "some read only grp")' print >> out_file, '}' idx = idx + 1 if out_file != sys.stdout: out_file.close()
logquant_v1.py
""" Author: CAI JINGYONG @ BeatCraft, Inc & Tokyo University of Agriculture and Technology placeholder input: numpy array output: numpy array """ import numpy class LogQuant: def __init__(self,layer,bitwidth): self.layer_data = layer self.width = bitwidth self.maxima = numpy.amax(layer) self.minima = numpy.amin(layer) self.fsr = self.maxima - self.minima self.sign = numpy.sign(layer) pass def __clip(self, x): # min = self.fsr-(2**self.width) min = 4 - (2**self.width) if(x <= min): return 0 elif(x >= 4): return 4 - 1
else: return x def __round(self,x): bridge = numpy.sqrt(2)-1 decimalpart, intpart = numpy.modf(x) if decimalpart >= bridge: return numpy.ceil(x) else: return numpy.floor(x) @property def log_quantize(self): round = numpy.vectorize(self.__round) clip = numpy.vectorize(self.__clip) # numpy.log2(0) -> -infinity == float("-inf") which will be used in clip method return numpy.array(clip(round(numpy.log2(abs(self.layer_data)))),dtype=numpy.int8) @property def de_quantize(self): x = numpy.power(2.0, self.log_quantized) return x * self.sign
pqueue.ts
export interface PQueueComparator<T> { (a: T, b: T): number } export default class PQueue<T> { contents: T[] private _sorted: boolean private _comparator: PQueueComparator<T> private _sort (): void { if (!this._sorted) { this.contents.sort(this._comparator) this._sorted = true } } constructor (comparator: PQueueComparator<T>) { this._comparator = comparator this.contents = [] this._sorted = false }
push (item: T): void { this.contents.push(item) this._sorted = false } peek (index?: number): T { this._sort() index = typeof index === 'number' ? index : this.contents.length - 1 return this.contents[index] } pop () { this._sort() return this.contents.pop() } size (): number { return this.contents.length } map<U> (mapper: (item: T, index: number) => any): U[] { this._sort() return this.contents.map(mapper) } }
doc.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. /* Compile, typically invoked as ``go tool compile,'' compiles a single Go package comprising the files named on the command line. It then writes a single object file named for the basename of the first source file with a .o suffix. The object file can then be combined with other objects into a package archive or passed directly to the linker (``go tool link''). If invoked with -pack, the compiler writes an archive directly, bypassing the intermediate object file. The generated files contain type information about the symbols exported by the package and about types used by symbols imported by the package from
package P to read the files of P's dependencies, only the compiled output of P. Command Line Usage: go tool compile [flags] file... The specified files must be Go source files and all part of the same package. The same compiler is used for all target operating systems and architectures. The GOOS and GOARCH environment variables set the desired target. Flags: -D path Set relative path for local imports. -I dir1 -I dir2 Search for imported packages in dir1, dir2, etc, after consulting $GOROOT/pkg/$GOOS_$GOARCH. -L Show complete file path in error messages. -N Disable optimizations. -S Print assembly listing to standard output (code only). -S -S Print assembly listing to standard output (code and data). -V Print compiler version and exit. -asmhdr file Write assembly header to file. -complete Assume package has no non-Go components. -cpuprofile file Write a CPU profile for the compilation to file. -dynlink Allow references to Go symbols in shared libraries (experimental). -e Remove the limit on the number of errors reported (default limit is 10). -h Halt with a stack trace at the first error detected. -importmap old=new Interpret import "old" as import "new" during compilation. The option may be repeated to add multiple mappings. -installsuffix suffix Look for packages in $GOROOT/pkg/$GOOS_$GOARCH_suffix instead of $GOROOT/pkg/$GOOS_$GOARCH. -largemodel Generated code that assumes a large memory model. -memprofile file Write memory profile for the compilation to file. -memprofilerate rate Set runtime.MemProfileRate for the compilation to rate. -msan Insert calls to C/C++ memory sanitizer. -nolocalimports Disallow local (relative) imports. -o file Write object to file (default file.o or, with -pack, file.a). -p path Set expected package import path for the code being compiled, and diagnose imports that would cause a circular dependency. -pack Write a package (archive) file rather than an object file -race Compile with race detector enabled. -u Disallow importing packages not marked as safe; implies -nolocalimports. There are also a number of debugging flags; run the command with no arguments for a usage message. Compiler Directives The compiler accepts compiler directives in the form of // comments at the beginning of a line. To distinguish them from non-directive comments, the directives require no space between the slashes and the name of the directive. However, since they are comments, tools unaware of the directive convention or of a particular directive can skip over a directive like any other comment. //line path/to/file:linenumber The //line directive specifies that the source line that follows should be recorded as having come from the given file path and line number. Successive lines are recorded using increasing line numbers, until the next directive. This directive typically appears in machine-generated code, so that compilers and debuggers will show lines in the original input to the generator. The //line directive is an historical special case; all other directives are of the form //go:name, indicating that the directive is defined by the Go toolchain. //go:noescape The //go:noescape directive specifies that the next declaration in the file, which must be a func without a body (meaning that it has an implementation not written in Go) does not allow any of the pointers passed as arguments to escape into the heap or into the values returned from the function. This information can be used during the compiler's escape analysis of Go code calling the function. //go:nosplit The //go:nosplit directive specifies that the next function declared in the file must not include a stack overflow check. This is most commonly used by low-level runtime sources invoked at times when it is unsafe for the calling goroutine to be preempted. //go:linkname localname importpath.name The //go:linkname directive instructs the compiler to use ``importpath.name'' as the object file symbol name for the variable or function declared as ``localname'' in the source code. Because this directive can subvert the type system and package modularity, it is only enabled in files that have imported "unsafe". */ package main
other packages. It is therefore not necessary when compiling client C of
OrangeAutoSave.js
/*============================================================================= * Orange - AutoSave * By Hudell - www.hudell.com * OrangeAutoSave.js * Version: 1.1.1 * Free for commercial and non commercial use. *=============================================================================*/ /*: * @plugindesc Automatically save the game on map change <OrangeAutoSave> * @author Hudell * * @param saveSlot * @desc Set this to the number of the slot where the plugin should save * @default 1 * * @param saveOnPluginTransfer * @desc save game automatically on any kind of player transfer * @default false * * @param saveOnTransferCommand * @desc save game automatically when the "transfer player" command is used * @default true * * @param autoSaveSlot * @desc Instead of using the saveSlot param, the plugin will pick the last used slot * @default false * * @help * ============================================================================ * Hudell's Plugins * ============================================================================ * * Check out my website: * http://hudell.com * * ============================================================================ * * You only need to enable saveOnPluginTransfer if you have other plugins * that transfer the player and you want the game to be saved on those * transfers too. * * When you enable it, this plugin will have to change the "new game" and * "load game" commands to make sure the game isn't autosaved by them too. * * ============================================================================ * * You can trigger an auto save with the following script call: * * DataManager.autoSave(); * *=============================================================================*/ var Imported = Imported || {}; var Hudell = Hudell || {}; Hudell.OrangeAutoSave = Hudell.OrangeAutoSave || {}; (function($) { "use strict"; var parameters = $plugins.filter(function(plugin) { return plugin.description.contains('<OrangeAutoSave>'); }); if (parameters.length === 0) { throw new Error("Couldn't find Hudell's OrangeAutoSave parameters."); } $.Parameters = parameters[0].parameters; $.Param = {}; $.enabled = true; $.skipCalls = 0; // Param validation if ($.Parameters.saveSlot !== "false") { $.Param.saveSlot = Number($.Parameters.saveSlot || 1); } else { $.Param.saveSlot = 99; } $.Param.saveOnPluginTransfer = ($.Parameters.saveOnPluginTransfer || "").toLowerCase() === "true"; $.Param.saveOnTransferCommand = ($.Parameters.saveOnTransferCommand || "").toLowerCase() !== "false"; $.Param.autoSaveSlot = ($.Parameters.autoSaveSlot || "").toLowerCase() !== "false"; $.Param.currentSaveSlot = $.Param.saveSlot; // Code $.getSaveSlot = function() { return $.Param.currentSaveSlot; }; $.skipNextCall = function() { $.skipCalls++; }; $.doAutoSave = function() { $gameSystem.onBeforeSave(); DataManager.saveGameWithoutRescue($.getSaveSlot()); }; //Only change the performTransfer method if it's activated through params if ($.Param.saveOnPluginTransfer) { $.oldGamePlayer_performTransfer = Game_Player.prototype.performTransfer; Game_Player.prototype.performTransfer = function() { $.oldGamePlayer_performTransfer.call(this); if ($.skipCalls > 0) { $.skipCalls--; return; } if (this._newMapId > 0) { if ($.enabled) { $.doAutoSave(); } } }; //Changes setupNewGame so that the initial player transfer doesn't trigger an auto save $.oldDataManager_setupNewGame = DataManager.setupNewGame; DataManager.setupNewGame = function() { $.skipNextCall(); $.oldDataManager_setupNewGame.call(this); }; //Changes reloadMapIfUpdated so that loading a game doesn't trigger an auto save $.oldSceneLoad_reloadMapIfUpdated = Scene_Load.prototype.reloadMapIfUpdated; Scene_Load.prototype.reloadMapIfUpdated = function() { if ($gameSystem.versionId() !== $dataSystem.versionId) { $.skipNextCall(); } $.oldSceneLoad_reloadMapIfUpdated.call(this); }; //Only change the command if the performTransfer is disabled and the transfer command is enabled } else if ($.Param.saveOnTransferCommand) { $.oldGameInterpreter_command201 = Game_Interpreter.prototype.command201; Game_Interpreter.prototype.command201 = function() { $.oldGameInterpreter_command201.call(this); if ($gamePlayer.isTransferring() && $.enabled) { $.doAutoSave();
} }; } if ($.Param.autoSaveSlot) { var oldDataManager_saveGameWithoutRescue = DataManager.saveGameWithoutRescue; DataManager.saveGameWithoutRescue = function(savefileId) { oldDataManager_saveGameWithoutRescue.call(this, savefileId); $.Param.currentSaveSlot = savefileId; }; var oldDataManager_loadGameWithoutRescue = DataManager.loadGameWithoutRescue; DataManager.loadGameWithoutRescue = function(savefileId) { oldDataManager_loadGameWithoutRescue.call(this, savefileId); $.Param.currentSaveSlot = savefileId; }; var autoSaveSlot_setupNewGame = DataManager.setupNewGame; DataManager.setupNewGame = function() { autoSaveSlot_setupNewGame.call(this); $.Param.currentSaveSlot = $.Param.saveSlot; }; } DataManager.autoSave = $.doAutoSave; })(Hudell.OrangeAutoSave); Imported.OrangeAutoSave = 1.1;
error_meta.rs
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum Error { ConflictException(crate::error::ConflictException), InternalServerException(crate::error::InternalServerException), ResourceNotFoundException(crate::error::ResourceNotFoundException), ServiceQuotaExceededException(crate::error::ServiceQuotaExceededException), ValidationException(crate::error::ValidationException), Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Error::ConflictException(inner) => inner.fmt(f), Error::InternalServerException(inner) => inner.fmt(f), Error::ResourceNotFoundException(inner) => inner.fmt(f), Error::ServiceQuotaExceededException(inner) => inner.fmt(f), Error::ValidationException(inner) => inner.fmt(f), Error::Unhandled(inner) => inner.fmt(f), } } } impl<R> From<smithy_http::result::SdkError<crate::error::AssociateAttributeGroupError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from( err: smithy_http::result::SdkError<crate::error::AssociateAttributeGroupError, R>, ) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::AssociateAttributeGroupErrorKind::InternalServerException(inner) => { Error::InternalServerException(inner) } crate::error::AssociateAttributeGroupErrorKind::ResourceNotFoundException( inner, ) => Error::ResourceNotFoundException(inner), crate::error::AssociateAttributeGroupErrorKind::ServiceQuotaExceededException( inner, ) => Error::ServiceQuotaExceededException(inner), crate::error::AssociateAttributeGroupErrorKind::ValidationException(inner) => { Error::ValidationException(inner) } crate::error::AssociateAttributeGroupErrorKind::Unhandled(inner) => { Error::Unhandled(inner) } }, _ => Error::Unhandled(err.into()), } } } impl<R> From<smithy_http::result::SdkError<crate::error::AssociateResourceError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from(err: smithy_http::result::SdkError<crate::error::AssociateResourceError, R>) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::AssociateResourceErrorKind::ConflictException(inner) => { Error::ConflictException(inner) } crate::error::AssociateResourceErrorKind::InternalServerException(inner) => { Error::InternalServerException(inner) } crate::error::AssociateResourceErrorKind::ResourceNotFoundException(inner) => { Error::ResourceNotFoundException(inner) } crate::error::AssociateResourceErrorKind::ServiceQuotaExceededException(inner) => { Error::ServiceQuotaExceededException(inner) } crate::error::AssociateResourceErrorKind::Unhandled(inner) => { Error::Unhandled(inner) } }, _ => Error::Unhandled(err.into()), } } } impl<R> From<smithy_http::result::SdkError<crate::error::CreateApplicationError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from(err: smithy_http::result::SdkError<crate::error::CreateApplicationError, R>) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::CreateApplicationErrorKind::ConflictException(inner) => { Error::ConflictException(inner) } crate::error::CreateApplicationErrorKind::InternalServerException(inner) => { Error::InternalServerException(inner) } crate::error::CreateApplicationErrorKind::ServiceQuotaExceededException(inner) => { Error::ServiceQuotaExceededException(inner) } crate::error::CreateApplicationErrorKind::Unhandled(inner) => { Error::Unhandled(inner) } }, _ => Error::Unhandled(err.into()), } } } impl<R> From<smithy_http::result::SdkError<crate::error::CreateAttributeGroupError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from( err: smithy_http::result::SdkError<crate::error::CreateAttributeGroupError, R>, ) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::CreateAttributeGroupErrorKind::ConflictException(inner) => { Error::ConflictException(inner) } crate::error::CreateAttributeGroupErrorKind::InternalServerException(inner) => { Error::InternalServerException(inner) } crate::error::CreateAttributeGroupErrorKind::ServiceQuotaExceededException( inner, ) => Error::ServiceQuotaExceededException(inner), crate::error::CreateAttributeGroupErrorKind::ValidationException(inner) => { Error::ValidationException(inner) } crate::error::CreateAttributeGroupErrorKind::Unhandled(inner) => { Error::Unhandled(inner) } }, _ => Error::Unhandled(err.into()), } } } impl<R> From<smithy_http::result::SdkError<crate::error::DeleteApplicationError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from(err: smithy_http::result::SdkError<crate::error::DeleteApplicationError, R>) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::DeleteApplicationErrorKind::InternalServerException(inner) => { Error::InternalServerException(inner) } crate::error::DeleteApplicationErrorKind::ResourceNotFoundException(inner) => { Error::ResourceNotFoundException(inner) } crate::error::DeleteApplicationErrorKind::ValidationException(inner) => { Error::ValidationException(inner) } crate::error::DeleteApplicationErrorKind::Unhandled(inner) => { Error::Unhandled(inner) } }, _ => Error::Unhandled(err.into()), } } } impl<R> From<smithy_http::result::SdkError<crate::error::DeleteAttributeGroupError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn
( err: smithy_http::result::SdkError<crate::error::DeleteAttributeGroupError, R>, ) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::DeleteAttributeGroupErrorKind::InternalServerException(inner) => { Error::InternalServerException(inner) } crate::error::DeleteAttributeGroupErrorKind::ResourceNotFoundException(inner) => { Error::ResourceNotFoundException(inner) } crate::error::DeleteAttributeGroupErrorKind::ValidationException(inner) => { Error::ValidationException(inner) } crate::error::DeleteAttributeGroupErrorKind::Unhandled(inner) => { Error::Unhandled(inner) } }, _ => Error::Unhandled(err.into()), } } } impl<R> From<smithy_http::result::SdkError<crate::error::DisassociateAttributeGroupError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from( err: smithy_http::result::SdkError<crate::error::DisassociateAttributeGroupError, R>, ) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::DisassociateAttributeGroupErrorKind::InternalServerException( inner, ) => Error::InternalServerException(inner), crate::error::DisassociateAttributeGroupErrorKind::ResourceNotFoundException( inner, ) => Error::ResourceNotFoundException(inner), crate::error::DisassociateAttributeGroupErrorKind::ValidationException(inner) => { Error::ValidationException(inner) } crate::error::DisassociateAttributeGroupErrorKind::Unhandled(inner) => { Error::Unhandled(inner) } }, _ => Error::Unhandled(err.into()), } } } impl<R> From<smithy_http::result::SdkError<crate::error::DisassociateResourceError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from( err: smithy_http::result::SdkError<crate::error::DisassociateResourceError, R>, ) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::DisassociateResourceErrorKind::InternalServerException(inner) => { Error::InternalServerException(inner) } crate::error::DisassociateResourceErrorKind::ResourceNotFoundException(inner) => { Error::ResourceNotFoundException(inner) } crate::error::DisassociateResourceErrorKind::Unhandled(inner) => { Error::Unhandled(inner) } }, _ => Error::Unhandled(err.into()), } } } impl<R> From<smithy_http::result::SdkError<crate::error::GetApplicationError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from(err: smithy_http::result::SdkError<crate::error::GetApplicationError, R>) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::GetApplicationErrorKind::InternalServerException(inner) => { Error::InternalServerException(inner) } crate::error::GetApplicationErrorKind::ResourceNotFoundException(inner) => { Error::ResourceNotFoundException(inner) } crate::error::GetApplicationErrorKind::ValidationException(inner) => { Error::ValidationException(inner) } crate::error::GetApplicationErrorKind::Unhandled(inner) => Error::Unhandled(inner), }, _ => Error::Unhandled(err.into()), } } } impl<R> From<smithy_http::result::SdkError<crate::error::GetAssociatedResourceError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from( err: smithy_http::result::SdkError<crate::error::GetAssociatedResourceError, R>, ) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::GetAssociatedResourceErrorKind::InternalServerException(inner) => { Error::InternalServerException(inner) } crate::error::GetAssociatedResourceErrorKind::ResourceNotFoundException(inner) => { Error::ResourceNotFoundException(inner) } crate::error::GetAssociatedResourceErrorKind::ValidationException(inner) => { Error::ValidationException(inner) } crate::error::GetAssociatedResourceErrorKind::Unhandled(inner) => { Error::Unhandled(inner) } }, _ => Error::Unhandled(err.into()), } } } impl<R> From<smithy_http::result::SdkError<crate::error::GetAttributeGroupError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from(err: smithy_http::result::SdkError<crate::error::GetAttributeGroupError, R>) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::GetAttributeGroupErrorKind::InternalServerException(inner) => { Error::InternalServerException(inner) } crate::error::GetAttributeGroupErrorKind::ResourceNotFoundException(inner) => { Error::ResourceNotFoundException(inner) } crate::error::GetAttributeGroupErrorKind::ValidationException(inner) => { Error::ValidationException(inner) } crate::error::GetAttributeGroupErrorKind::Unhandled(inner) => { Error::Unhandled(inner) } }, _ => Error::Unhandled(err.into()), } } } impl<R> From<smithy_http::result::SdkError<crate::error::ListApplicationsError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from(err: smithy_http::result::SdkError<crate::error::ListApplicationsError, R>) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::ListApplicationsErrorKind::InternalServerException(inner) => { Error::InternalServerException(inner) } crate::error::ListApplicationsErrorKind::ValidationException(inner) => { Error::ValidationException(inner) } crate::error::ListApplicationsErrorKind::Unhandled(inner) => { Error::Unhandled(inner) } }, _ => Error::Unhandled(err.into()), } } } impl<R> From<smithy_http::result::SdkError<crate::error::ListAssociatedAttributeGroupsError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from( err: smithy_http::result::SdkError<crate::error::ListAssociatedAttributeGroupsError, R>, ) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::ListAssociatedAttributeGroupsErrorKind::InternalServerException( inner, ) => Error::InternalServerException(inner), crate::error::ListAssociatedAttributeGroupsErrorKind::ResourceNotFoundException( inner, ) => Error::ResourceNotFoundException(inner), crate::error::ListAssociatedAttributeGroupsErrorKind::ValidationException( inner, ) => Error::ValidationException(inner), crate::error::ListAssociatedAttributeGroupsErrorKind::Unhandled(inner) => { Error::Unhandled(inner) } }, _ => Error::Unhandled(err.into()), } } } impl<R> From<smithy_http::result::SdkError<crate::error::ListAssociatedResourcesError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from( err: smithy_http::result::SdkError<crate::error::ListAssociatedResourcesError, R>, ) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::ListAssociatedResourcesErrorKind::InternalServerException(inner) => { Error::InternalServerException(inner) } crate::error::ListAssociatedResourcesErrorKind::ResourceNotFoundException( inner, ) => Error::ResourceNotFoundException(inner), crate::error::ListAssociatedResourcesErrorKind::ValidationException(inner) => { Error::ValidationException(inner) } crate::error::ListAssociatedResourcesErrorKind::Unhandled(inner) => { Error::Unhandled(inner) } }, _ => Error::Unhandled(err.into()), } } } impl<R> From<smithy_http::result::SdkError<crate::error::ListAttributeGroupsError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from(err: smithy_http::result::SdkError<crate::error::ListAttributeGroupsError, R>) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::ListAttributeGroupsErrorKind::InternalServerException(inner) => { Error::InternalServerException(inner) } crate::error::ListAttributeGroupsErrorKind::ValidationException(inner) => { Error::ValidationException(inner) } crate::error::ListAttributeGroupsErrorKind::Unhandled(inner) => { Error::Unhandled(inner) } }, _ => Error::Unhandled(err.into()), } } } impl<R> From<smithy_http::result::SdkError<crate::error::ListTagsForResourceError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from(err: smithy_http::result::SdkError<crate::error::ListTagsForResourceError, R>) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::ListTagsForResourceErrorKind::InternalServerException(inner) => { Error::InternalServerException(inner) } crate::error::ListTagsForResourceErrorKind::ResourceNotFoundException(inner) => { Error::ResourceNotFoundException(inner) } crate::error::ListTagsForResourceErrorKind::ValidationException(inner) => { Error::ValidationException(inner) } crate::error::ListTagsForResourceErrorKind::Unhandled(inner) => { Error::Unhandled(inner) } }, _ => Error::Unhandled(err.into()), } } } impl<R> From<smithy_http::result::SdkError<crate::error::SyncResourceError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from(err: smithy_http::result::SdkError<crate::error::SyncResourceError, R>) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::SyncResourceErrorKind::ConflictException(inner) => { Error::ConflictException(inner) } crate::error::SyncResourceErrorKind::InternalServerException(inner) => { Error::InternalServerException(inner) } crate::error::SyncResourceErrorKind::ResourceNotFoundException(inner) => { Error::ResourceNotFoundException(inner) } crate::error::SyncResourceErrorKind::Unhandled(inner) => Error::Unhandled(inner), }, _ => Error::Unhandled(err.into()), } } } impl<R> From<smithy_http::result::SdkError<crate::error::TagResourceError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from(err: smithy_http::result::SdkError<crate::error::TagResourceError, R>) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::TagResourceErrorKind::InternalServerException(inner) => { Error::InternalServerException(inner) } crate::error::TagResourceErrorKind::ResourceNotFoundException(inner) => { Error::ResourceNotFoundException(inner) } crate::error::TagResourceErrorKind::ValidationException(inner) => { Error::ValidationException(inner) } crate::error::TagResourceErrorKind::Unhandled(inner) => Error::Unhandled(inner), }, _ => Error::Unhandled(err.into()), } } } impl<R> From<smithy_http::result::SdkError<crate::error::UntagResourceError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from(err: smithy_http::result::SdkError<crate::error::UntagResourceError, R>) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::UntagResourceErrorKind::InternalServerException(inner) => { Error::InternalServerException(inner) } crate::error::UntagResourceErrorKind::ResourceNotFoundException(inner) => { Error::ResourceNotFoundException(inner) } crate::error::UntagResourceErrorKind::ValidationException(inner) => { Error::ValidationException(inner) } crate::error::UntagResourceErrorKind::Unhandled(inner) => Error::Unhandled(inner), }, _ => Error::Unhandled(err.into()), } } } impl<R> From<smithy_http::result::SdkError<crate::error::UpdateApplicationError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from(err: smithy_http::result::SdkError<crate::error::UpdateApplicationError, R>) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::UpdateApplicationErrorKind::ConflictException(inner) => { Error::ConflictException(inner) } crate::error::UpdateApplicationErrorKind::InternalServerException(inner) => { Error::InternalServerException(inner) } crate::error::UpdateApplicationErrorKind::ResourceNotFoundException(inner) => { Error::ResourceNotFoundException(inner) } crate::error::UpdateApplicationErrorKind::Unhandled(inner) => { Error::Unhandled(inner) } }, _ => Error::Unhandled(err.into()), } } } impl<R> From<smithy_http::result::SdkError<crate::error::UpdateAttributeGroupError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from( err: smithy_http::result::SdkError<crate::error::UpdateAttributeGroupError, R>, ) -> Self { match err { smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::UpdateAttributeGroupErrorKind::ConflictException(inner) => { Error::ConflictException(inner) } crate::error::UpdateAttributeGroupErrorKind::InternalServerException(inner) => { Error::InternalServerException(inner) } crate::error::UpdateAttributeGroupErrorKind::ResourceNotFoundException(inner) => { Error::ResourceNotFoundException(inner) } crate::error::UpdateAttributeGroupErrorKind::ValidationException(inner) => { Error::ValidationException(inner) } crate::error::UpdateAttributeGroupErrorKind::Unhandled(inner) => { Error::Unhandled(inner) } }, _ => Error::Unhandled(err.into()), } } } impl std::error::Error for Error {}
from
form-editor.js
import React, {PureComponent} from 'react'; import PropTypes from 'prop-types'; import autobind from 'autobind-decorator'; import KeyValueEditor from '../../key-value-editor/editor'; import {trackEvent} from '../../../../common/analytics'; @autobind class
extends PureComponent { _handleTrackToggle (pair) { trackEvent( 'Form Editor', `Toggle ${pair.type || 'text'}`, pair.disabled ? 'Disable' : 'Enable' ); } _handleTrackChangeType (type) { trackEvent('Form Editor', 'Change Type', type); } _handleTrackChooseFile () { trackEvent('Form Editor', 'Choose File'); } _handleTrackCreate () { trackEvent('Form Editor', 'Create'); } _handleTrackDelete () { trackEvent('Form Editor', 'Delete'); } render () { const { parameters, onChange, handleRender, handleGetRenderContext, nunjucksPowerUserMode } = this.props; return ( <div className="scrollable-container tall wide"> <div className="scrollable"> <KeyValueEditor sortable allowFile allowMultiline namePlaceholder="name" valuePlaceholder="value" handleRender={handleRender} handleGetRenderContext={handleGetRenderContext} nunjucksPowerUserMode={nunjucksPowerUserMode} onToggleDisable={this._handleTrackToggle} onChangeType={this._handleTrackChangeType} onChooseFile={this._handleTrackChooseFile} onCreate={this._handleTrackCreate} onDelete={this._handleTrackDelete} onChange={onChange} pairs={parameters} /> </div> </div> ); } } FormEditor.propTypes = { // Required onChange: PropTypes.func.isRequired, parameters: PropTypes.arrayOf(PropTypes.object).isRequired, nunjucksPowerUserMode: PropTypes.bool.isRequired, // Optional handleRender: PropTypes.func, handleGetRenderContext: PropTypes.func }; export default FormEditor;
FormEditor
band_eu433.go
package band import "time" func newEU433Band(repeaterCompatible bool) (Band, error) { var maxPayloadSize []MaxPayloadSize if repeaterCompatible
else { maxPayloadSize = []MaxPayloadSize{ {M: 59, N: 51}, {M: 59, N: 51}, {M: 59, N: 51}, {M: 123, N: 115}, {M: 250, N: 242}, {M: 250, N: 242}, {M: 250, N: 242}, {M: 250, N: 242}, } } return Band{ DefaultTXPower: 10, ImplementsCFlist: true, RX2Frequency: 434665000, RX2DataRate: 0, MaxFCntGap: 16384, ADRACKLimit: 64, ADRACKDelay: 32, ReceiveDelay1: time.Second, ReceiveDelay2: time.Second * 2, JoinAcceptDelay1: time.Second * 5, JoinAcceptDelay2: time.Second * 6, ACKTimeoutMin: time.Second, ACKTimeoutMax: time.Second * 3, DataRates: []DataRate{ {Modulation: LoRaModulation, SpreadFactor: 12, Bandwidth: 125}, {Modulation: LoRaModulation, SpreadFactor: 11, Bandwidth: 125}, {Modulation: LoRaModulation, SpreadFactor: 10, Bandwidth: 125}, {Modulation: LoRaModulation, SpreadFactor: 9, Bandwidth: 125}, {Modulation: LoRaModulation, SpreadFactor: 8, Bandwidth: 125}, {Modulation: LoRaModulation, SpreadFactor: 7, Bandwidth: 125}, {Modulation: LoRaModulation, SpreadFactor: 7, Bandwidth: 250}, {Modulation: FSKModulation, BitRate: 50000}, }, MaxPayloadSize: maxPayloadSize, rx1DataRate: [][]int{ {0, 0, 0, 0, 0, 0}, {1, 0, 0, 0, 0, 0}, {2, 1, 0, 0, 0, 0}, {3, 2, 1, 0, 0, 0}, {4, 3, 2, 1, 0, 0}, {5, 4, 3, 2, 1, 0}, {6, 5, 4, 3, 2, 1}, {7, 6, 5, 4, 3, 2}, }, TXPowerOffset: []int{ 0, -2, -4, -6, -8, -10, }, UplinkChannels: []Channel{ {Frequency: 433175000, DataRates: []int{0, 1, 2, 3, 4, 5}}, {Frequency: 433375000, DataRates: []int{0, 1, 2, 3, 4, 5}}, {Frequency: 433575000, DataRates: []int{0, 1, 2, 3, 4, 5}}, }, DownlinkChannels: []Channel{ {Frequency: 433175000, DataRates: []int{0, 1, 2, 3, 4, 5}}, {Frequency: 433375000, DataRates: []int{0, 1, 2, 3, 4, 5}}, {Frequency: 433575000, DataRates: []int{0, 1, 2, 3, 4, 5}}, }, getRX1ChannelFunc: func(txChannel int) int { return txChannel }, getRX1FrequencyFunc: func(b *Band, txFrequency int) (int, error) { return txFrequency, nil }, }, nil }
{ maxPayloadSize = []MaxPayloadSize{ {M: 59, N: 51}, {M: 59, N: 51}, {M: 59, N: 51}, {M: 123, N: 115}, {M: 230, N: 222}, {M: 230, N: 222}, {M: 230, N: 222}, {M: 230, N: 222}, } }
test_000_client_core.py
import unittest from tests import TestClient from procountor.client import Client class TestClientClient(TestClient):
if __name__ == '__main__': unittest.main()
def __init__(self, *args, **kwargs): super(TestClientClient, self).__init__(*args, **kwargs) def test_0001_init(self): """ Testing that client has required params """ self.assertIsInstance(self.client, Client) self.assertIsNotNone(self.client.api_key) self.assertIsNotNone(self.client.client_id) self.assertIsNotNone(self.client.client_secret) self.assertIsNotNone(self.client.redirect_uri) self.assertEqual(self.client.test_mode, True) self.assertIsNotNone(self.client.access_token) def test_0004_headers(self): response = self.client._headers("GET", "users") self.assertEqual(response['authorization'], "Bearer {}".format(self.client.access_token))
5.await.js
// async function increment(number) { let promise = new Promise(function (resolve, reject) { setTimeout(function () { if (number) { return resolve(number++);
} return reject("Invalid number"); }, 2000); }); try { let result = await promise; console.log(result); } catch (err) { console.error(err); } } increment(5); increment();
mod.rs
//! Generated code for handling light client protobuf structs. use ff::PrimeField; use group::GroupEncoding; use std::convert::TryInto; use zcash_primitives::{ block::{BlockHash, BlockHeader}, consensus::BlockHeight, }; pub mod compact_formats; impl compact_formats::CompactBlock { /// Returns the [`BlockHash`] for this block. /// /// # Panics /// /// This function will panic if [`CompactBlock.header`] is not set and /// [`CompactBlock.hash`] is not exactly 32 bytes. /// /// [`CompactBlock.header`]: #structfield.header /// [`CompactBlock.hash`]: #structfield.hash pub fn hash(&self) -> BlockHash { if let Some(header) = self.header() { header.hash() } else { BlockHash::from_slice(&self.hash) } } /// Returns the [`BlockHash`] for this block's parent. /// /// # Panics /// /// This function will panic if [`CompactBlock.header`] is not set and /// [`CompactBlock.prevHash`] is not exactly 32 bytes. /// /// [`CompactBlock.header`]: #structfield.header /// [`CompactBlock.prevHash`]: #structfield.prevHash pub fn prev_hash(&self) -> BlockHash { if let Some(header) = self.header() { header.prev_block } else { BlockHash::from_slice(&self.prevHash) } } /// Returns the [`BlockHeader`] for this block if present. /// /// A convenience method that parses [`CompactBlock.header`] if present. /// /// [`CompactBlock.header`]: #structfield.header pub fn header(&self) -> Option<BlockHeader> { if self.header.is_empty() { None } else { BlockHeader::read(&self.header[..]).ok() } } /// Returns the [`BlockHeight`] for this block. /// /// A convenience method that wraps [`CompactBlock.height`] /// /// [`CompactBlock.height`]: #structfield.height pub fn height(&self) -> BlockHeight { BlockHeight::from(self.height) } } impl compact_formats::CompactOutput { /// Returns the note commitment for this output. /// /// A convenience method that parses [`CompactOutput.cmu`]. ///
pub fn cmu(&self) -> Result<bls12_381::Scalar, ()> { let mut repr = [0; 32]; repr.as_mut().copy_from_slice(&self.cmu[..]); bls12_381::Scalar::from_repr(repr).ok_or(()) } /// Returns the ephemeral public key for this output. /// /// A convenience method that parses [`CompactOutput.epk`]. /// /// [`CompactOutput.epk`]: #structfield.epk pub fn epk(&self) -> Result<jubjub::SubgroupPoint, ()> { let p = jubjub::SubgroupPoint::from_bytes(&self.epk[..].try_into().map_err(|_| ())?); if p.is_some().into() { Ok(p.unwrap()) } else { Err(()) } } }
/// [`CompactOutput.cmu`]: #structfield.cmu
span_ranking_srl_model.py
from typing import Dict from alnlp.modules.feedforward import FeedForward from alnlp.modules.time_distributed import TimeDistributed from .highway_variational_lstm import * import torch from alnlp.modules import util from ...parsers.biaffine.biaffine import Biaffine def initializer_1d(input_tensor, initializer): assert len(input_tensor.size()) == 1 input_tensor = input_tensor.view(-1, 1) input_tensor = initializer(input_tensor) return input_tensor.view(-1) class SpanRankingSRLDecoder(nn.Module): def __init__(self, context_layer_output_dim, label_space_size, config) -> None:
def reset_parameters(self): init.xavier_uniform_(self.span_width_embedding.weight) # init.xavier_uniform_(self.context_projective_layer.weight) # initializer_1d(self.context_projective_layer.bias, init.xavier_uniform_) for layer in self.arg_unary_score_layers: init.xavier_uniform_(layer.weight) initializer_1d(layer.bias, init.xavier_uniform_) init.xavier_uniform_(self.arg_unary_score_projection.weight) initializer_1d(self.arg_unary_score_projection.bias, init.xavier_uniform_) for layer in self.pred_unary_score_layers: init.xavier_uniform_(layer.weight) initializer_1d(layer.bias, init.xavier_uniform_) init.xavier_uniform_(self.pred_unary_score_projection.weight) initializer_1d(self.pred_unary_score_projection.bias, init.xavier_uniform_) for layer in self.srl_unary_score_layers: init.xavier_uniform_(layer.weight) initializer_1d(layer.bias, init.xavier_uniform_) init.xavier_uniform_(self.srl_unary_score_projection.weight) initializer_1d(self.srl_unary_score_projection.bias, init.xavier_uniform_) return None def forward(self, hidden_states, batch, mask=None): gold_arg_ends, gold_arg_labels, gold_arg_starts, gold_predicates, masks, sent_lengths = SpanRankingSRLModel.unpack( batch, mask=mask, training=self.training) return self.decode(hidden_states, sent_lengths, masks, gold_arg_starts, gold_arg_ends, gold_arg_labels, gold_predicates) @staticmethod def get_candidate_spans(sent_lengths: torch.Tensor, max_sent_length, max_arg_width): num_sentences = len(sent_lengths) device = sent_lengths.device candidate_starts = torch.arange(0, max_sent_length, device=device).expand(num_sentences, max_arg_width, -1) candidate_width = torch.arange(0, max_arg_width, device=device).view(1, -1, 1) candidate_ends = candidate_starts + candidate_width candidate_starts = candidate_starts.contiguous().view(num_sentences, max_sent_length * max_arg_width) candidate_ends = candidate_ends.contiguous().view(num_sentences, max_sent_length * max_arg_width) actual_sent_lengths = sent_lengths.view(-1, 1).expand(-1, max_sent_length * max_arg_width) candidate_mask = candidate_ends < actual_sent_lengths candidate_starts = candidate_starts * candidate_mask candidate_ends = candidate_ends * candidate_mask return candidate_starts, candidate_ends, candidate_mask @staticmethod def exclusive_cumsum(input: torch.Tensor, exclusive=True): """ Args: input: input is the sentence lengths tensor. exclusive: exclude the last sentence length (Default value = True) input(torch.Tensor :): input: torch.Tensor: Returns: """ assert exclusive is True if exclusive is True: exclusive_sent_lengths = input.new_zeros(1, dtype=torch.long) result = torch.cumsum(torch.cat([exclusive_sent_lengths, input], 0)[:-1], 0).view(-1, 1) else: result = torch.cumsum(input, 0).view(-1, 1) return result def flatten_emb(self, emb): num_sentences, max_sentence_length = emb.size()[0], emb.size()[1] assert len(emb.size()) == 3 flatted_emb = emb.contiguous().view(num_sentences * max_sentence_length, -1) return flatted_emb def flatten_emb_in_sentence(self, emb, batch_sentences_mask): num_sentences, max_sentence_length = emb.size()[0], emb.size()[1] flatted_emb = self.flatten_emb(emb) return flatted_emb[batch_sentences_mask.reshape(num_sentences * max_sentence_length)] def get_span_emb(self, flatted_context_emb, flatted_candidate_starts, flatted_candidate_ends, config, dropout=0.0): batch_word_num = flatted_context_emb.size()[0] # gather slices from embeddings according to indices span_start_emb = flatted_context_emb[flatted_candidate_starts] span_end_emb = flatted_context_emb[flatted_candidate_ends] span_emb_feature_list = [span_start_emb, span_end_emb] # store the span vector representations for span rep. span_width = 1 + flatted_candidate_ends - flatted_candidate_starts # [num_spans], generate the span width max_arg_width = config.max_arg_width # get the span width feature emb span_width_index = span_width - 1 span_width_emb = self.span_width_embedding(span_width_index) span_width_emb = F.dropout(span_width_emb, dropout, self.training) span_emb_feature_list.append(span_width_emb) """head features""" cpu_flatted_candidte_starts = flatted_candidate_starts span_indices = torch.arange(0, max_arg_width, device=flatted_context_emb.device).view(1, -1) + \ cpu_flatted_candidte_starts.view(-1, 1) # For all the i, where i in [begin, ..i, end] for span # reset the position index to the batch_word_num index with index - 1 span_indices = torch.clamp(span_indices, max=batch_word_num - 1) num_spans, spans_width = span_indices.size()[0], span_indices.size()[1] flatted_span_indices = span_indices.view(-1) # so Huge!!!, column is the span? # if torch.cuda.is_available(): flatted_span_indices = flatted_span_indices span_text_emb = flatted_context_emb.index_select(0, flatted_span_indices).view(num_spans, spans_width, -1) span_indices_mask = util.lengths_to_mask(span_width, max_len=max_arg_width) # project context output to num head # head_scores = self.context_projective_layer.forward(flatted_context_emb) # get span attention # span_attention = head_scores.index_select(0, flatted_span_indices).view(num_spans, spans_width) # span_attention = torch.add(span_attention, expanded_span_indices_log_mask).unsqueeze(2) # control the span len # span_attention = F.softmax(span_attention, dim=1) span_text_emb = span_text_emb * span_indices_mask.unsqueeze(2).expand(-1, -1, span_text_emb.size()[-1]) span_head_emb = torch.mean(span_text_emb, 1) span_emb_feature_list.append(span_head_emb) span_emb = torch.cat(span_emb_feature_list, 1) return span_emb, None, span_text_emb, span_indices, span_indices_mask def get_arg_unary_scores(self, span_emb): """Compute span score with FFNN(span embedding) Args: span_emb: tensor of [num_sentences, num_spans, emb_size] config: param dropout: num_labels: param name: Returns: """ input = span_emb for i, ffnn in enumerate(self.arg_unary_score_layers): input = F.relu(ffnn.forward(input)) input = self.arg_dropout_layers[i].forward(input) output = self.arg_unary_score_projection.forward(input) return output def get_pred_unary_scores(self, span_emb): input = span_emb for i, ffnn in enumerate(self.pred_unary_score_layers): input = F.relu(ffnn.forward(input)) input = self.pred_dropout_layers[i].forward(input) output = self.pred_unary_score_projection.forward(input) return output def extract_spans(self, candidate_scores, candidate_starts, candidate_ends, topk, max_sentence_length, sort_spans, enforce_non_crossing): """extract the topk span indices Args: candidate_scores: param candidate_starts: candidate_ends: param topk: [num_sentences] max_sentence_length: param sort_spans: enforce_non_crossing: return: indices [num_sentences, max_num_predictions] candidate_starts: topk: sort_spans: Returns: """ # num_sentences = candidate_scores.size()[0] # num_input_spans = candidate_scores.size()[1] max_num_output_spans = int(torch.max(topk)) indices = [score.topk(k)[1] for score, k in zip(candidate_scores, topk)] output_span_indices_tensor = [F.pad(item, [0, max_num_output_spans - item.size()[0]], value=item[-1]) for item in indices] output_span_indices_tensor = torch.stack(output_span_indices_tensor) return output_span_indices_tensor def batch_index_select(self, emb, indices): num_sentences = emb.size()[0] max_sent_length = emb.size()[1] flatten_emb = self.flatten_emb(emb) offset = (torch.arange(0, num_sentences, device=emb.device) * max_sent_length).unsqueeze(1) return torch.index_select(flatten_emb, 0, (indices + offset).view(-1)) \ .view(indices.size()[0], indices.size()[1], -1) def get_batch_topk(self, candidate_starts: torch.Tensor, candidate_ends, candidate_scores, topk_ratio, text_len, max_sentence_length, sort_spans=False, enforce_non_crossing=True): num_sentences = candidate_starts.size()[0] max_sentence_length = candidate_starts.size()[1] topk = torch.floor(text_len.to(torch.float) * topk_ratio).to(torch.long) topk = torch.max(topk, torch.ones(num_sentences, device=candidate_starts.device, dtype=torch.long)) # this part should be implemented with C++ predicted_indices = self.extract_spans(candidate_scores, candidate_starts, candidate_ends, topk, max_sentence_length, sort_spans, enforce_non_crossing) predicted_starts = torch.gather(candidate_starts, 1, predicted_indices) predicted_ends = torch.gather(candidate_ends, 1, predicted_indices) predicted_scores = torch.gather(candidate_scores, 1, predicted_indices) return predicted_starts, predicted_ends, predicted_scores, topk, predicted_indices def get_dense_span_labels(self, span_starts, span_ends, span_labels, max_sentence_length, span_parents=None): num_sentences = span_starts.size()[0] max_spans_num = span_starts.size()[1] # span_starts = span_starts + 1 - (span_labels > 0).to(torch.long) span_starts[(span_labels == 0) & (span_starts < max_sentence_length - 1)] += 1 # make start > end sentence_indices = torch.arange(0, num_sentences, device=span_starts.device).unsqueeze(1).expand(-1, max_spans_num) sparse_indices = torch.cat([sentence_indices.unsqueeze(2), span_starts.unsqueeze(2), span_ends.unsqueeze(2)], dim=2) if span_parents is not None: # semantic span predicate offset sparse_indices = torch.cat([sparse_indices, span_parents.unsqueeze(2)], 2) rank = 3 if span_parents is None else 4 dense_labels = torch.sparse.LongTensor(sparse_indices.view(num_sentences * max_spans_num, rank).t(), span_labels.view(-1), torch.Size([num_sentences] + [max_sentence_length] * (rank - 1))) \ .to_dense() return dense_labels @staticmethod def gather_4d(params, indices): assert len(params.size()) == 4 and len(indices) == 4 indices_a, indices_b, indices_c, indices_d = indices result = params[indices_a, indices_b, indices_c, indices_d] return result def get_srl_labels(self, arg_starts, arg_ends, predicates, gold_predicates, gold_arg_starts, gold_arg_ends, gold_arg_labels, max_sentence_length ): num_sentences = arg_starts.size()[0] max_arg_num = arg_starts.size()[1] max_pred_num = predicates.size()[1] sentence_indices_2d = torch.arange(0, num_sentences, device=arg_starts.device).unsqueeze(1).unsqueeze(2).expand( -1, max_arg_num, max_pred_num) expanded_arg_starts = arg_starts.unsqueeze(2).expand(-1, -1, max_pred_num) expanded_arg_ends = arg_ends.unsqueeze(2).expand(-1, -1, max_pred_num) expanded_predicates = predicates.unsqueeze(1).expand(-1, max_arg_num, -1) dense_srl_labels = self.get_dense_span_labels(gold_arg_starts, gold_arg_ends, gold_arg_labels, max_sentence_length, span_parents=gold_predicates) # ans srl_labels = self.gather_4d(dense_srl_labels, [sentence_indices_2d, expanded_arg_starts, expanded_arg_ends, expanded_predicates]) return srl_labels def get_srl_unary_scores(self, span_emb): input = span_emb for i, ffnn in enumerate(self.srl_unary_score_layers): input = F.relu(ffnn.forward(input)) input = self.srl_dropout_layers[i].forward(input) output = self.srl_unary_score_projection.forward(input) return output def get_srl_scores(self, arg_emb, pred_emb, arg_scores, pred_scores, num_labels, config, dropout): num_sentences = arg_emb.size()[0] num_args = arg_emb.size()[1] # [batch_size, max_arg_num, arg_emb_size] num_preds = pred_emb.size()[1] # [batch_size, max_pred_num, pred_emb_size] unsqueezed_arg_emb = arg_emb.unsqueeze(2) unsqueezed_pred_emb = pred_emb.unsqueeze(1) expanded_arg_emb = unsqueezed_arg_emb.expand(-1, -1, num_preds, -1) expanded_pred_emb = unsqueezed_pred_emb.expand(-1, num_args, -1, -1) pair_emb_list = [expanded_arg_emb, expanded_pred_emb] pair_emb = torch.cat(pair_emb_list, 3) # concatenate the argument emb and pre emb pair_emb_size = pair_emb.size()[3] flat_pair_emb = pair_emb.view(num_sentences * num_args * num_preds, pair_emb_size) # get unary scores flat_srl_scores = self.get_srl_unary_scores(flat_pair_emb) srl_scores = flat_srl_scores.view(num_sentences, num_args, num_preds, -1) if self.config.use_biaffine: srl_scores += self.biaffine(arg_emb, self.predicate_scale(pred_emb)).permute([0, 2, 3, 1]) unsqueezed_arg_scores, unsqueezed_pred_scores = \ arg_scores.unsqueeze(2).unsqueeze(3), pred_scores.unsqueeze(1).unsqueeze(3) srl_scores = srl_scores + unsqueezed_arg_scores + unsqueezed_pred_scores dummy_scores = torch.zeros([num_sentences, num_args, num_preds, 1], device=arg_emb.device) srl_scores = torch.cat([dummy_scores, srl_scores], 3) return srl_scores def get_srl_softmax_loss(self, srl_scores, srl_labels, num_predicted_args, num_predicted_preds): srl_loss_mask = self.get_srl_loss_mask(srl_scores, num_predicted_args, num_predicted_preds) loss = torch.nn.functional.cross_entropy(srl_scores[srl_loss_mask], srl_labels[srl_loss_mask], reduction=self.loss_reduction) return loss, srl_loss_mask def get_srl_loss_mask(self, srl_scores, num_predicted_args, num_predicted_preds): max_num_arg = srl_scores.size()[1] max_num_pred = srl_scores.size()[2] # num_predicted_args, 1D tensor; max_num_arg: a int variable means the gold ans's max arg number args_mask = util.lengths_to_mask(num_predicted_args, max_num_arg) pred_mask = util.lengths_to_mask(num_predicted_preds, max_num_pred) srl_loss_mask = args_mask.unsqueeze(2) & pred_mask.unsqueeze(1) return srl_loss_mask def decode(self, contextualized_embeddings, sent_lengths, masks, gold_arg_starts, gold_arg_ends, gold_arg_labels, gold_predicates): num_sentences, max_sent_length = masks.size() device = sent_lengths.device """generate candidate spans with argument pruning""" # candidate_starts [num_sentences, max_sent_length * max_arg_width] candidate_starts, candidate_ends, candidate_mask = self.get_candidate_spans( sent_lengths, max_sent_length, self.config.max_arg_width) flatted_candidate_mask = candidate_mask.view(-1) batch_word_offset = self.exclusive_cumsum(sent_lengths) # get the word offset in a batch # choose the flatted_candidate_starts with the actual existing positions, i.e. exclude the illegal starts flatted_candidate_starts = candidate_starts + batch_word_offset flatted_candidate_starts = flatted_candidate_starts.view(-1)[flatted_candidate_mask].to(torch.long) flatted_candidate_ends = candidate_ends + batch_word_offset flatted_candidate_ends = flatted_candidate_ends.view(-1)[flatted_candidate_mask].to(torch.long) # flatten the lstm output according to the sentence mask, i.e. exclude the illegal (padding) lstm output flatted_context_output = self.flatten_emb_in_sentence(contextualized_embeddings, masks) """generate the span embedding""" candidate_span_emb, head_scores, span_head_emb, head_indices, head_indices_log_mask = self.get_span_emb( flatted_context_output, flatted_candidate_starts, flatted_candidate_ends, self.config, dropout=self.dropout) """Get the span ids""" candidate_span_number = candidate_span_emb.size()[0] max_candidate_spans_num_per_sentence = candidate_mask.size()[1] sparse_indices = candidate_mask.nonzero(as_tuple=False) sparse_values = torch.arange(0, candidate_span_number, device=device) candidate_span_ids = torch.sparse.FloatTensor(sparse_indices.t(), sparse_values, torch.Size([num_sentences, max_candidate_spans_num_per_sentence])).to_dense() spans_log_mask = torch.log(candidate_mask.to(torch.float)) predict_dict = {"candidate_starts": candidate_starts, "candidate_ends": candidate_ends, 'candidate_arg_mask': candidate_mask, "head_scores": head_scores} """Get unary scores and topk of candidate argument spans.""" flatted_candidate_arg_scores = self.get_arg_unary_scores(candidate_span_emb) candidate_arg_scores = flatted_candidate_arg_scores.index_select(0, candidate_span_ids.view(-1)) \ .view(candidate_span_ids.size()[0], candidate_span_ids.size()[1]) candidate_arg_scores = candidate_arg_scores + spans_log_mask arg_starts, arg_ends, arg_scores, num_args, top_arg_indices = \ self.get_batch_topk(candidate_starts, candidate_ends, candidate_arg_scores, self.config.argument_ratio, sent_lengths, max_sent_length, sort_spans=False, enforce_non_crossing=False) """Get the candidate predicate""" candidate_pred_ids = torch.arange(0, max_sent_length, device=device).unsqueeze(0).expand(num_sentences, -1) candidate_pred_emb = contextualized_embeddings candidate_pred_scores = self.get_pred_unary_scores(candidate_pred_emb) candidate_pred_scores = candidate_pred_scores + torch.log(masks.to(torch.float).unsqueeze(2)) candidate_pred_scores = candidate_pred_scores.squeeze(2) if self.use_gold_predicates is True: predicates = gold_predicates[0] num_preds = gold_predicates[1] pred_scores = torch.zeros_like(predicates) top_pred_indices = predicates else: predicates, _, pred_scores, num_preds, top_pred_indices = self.get_batch_topk( candidate_pred_ids, candidate_pred_ids, candidate_pred_scores, self.config.predicate_ratio, sent_lengths, max_sent_length, sort_spans=False, enforce_non_crossing=False) """Get top arg embeddings""" arg_span_indices = torch.gather(candidate_span_ids, 1, top_arg_indices) # [num_sentences, max_num_args] arg_emb = candidate_span_emb.index_select(0, arg_span_indices.view(-1)).view( arg_span_indices.size()[0], arg_span_indices.size()[1], -1 ) # [num_sentences, max_num_args, emb] """Get top predicate embeddings""" pred_emb = self.batch_index_select(candidate_pred_emb, top_pred_indices) # [num_sentences, max_num_preds, emb] """Get the srl scores according to the arg emb and pre emb.""" srl_scores = self.get_srl_scores(arg_emb, pred_emb, arg_scores, pred_scores, self.label_space_size, self.config, self.dropout) # [num_sentences, max_num_args, max_num_preds, num_labels] if gold_arg_labels is not None: """Get the answers according to the labels""" srl_labels = self.get_srl_labels(arg_starts, arg_ends, predicates, gold_predicates, gold_arg_starts, gold_arg_ends, gold_arg_labels, max_sent_length) """Compute the srl loss""" srl_loss, srl_mask = self.get_srl_softmax_loss(srl_scores, srl_labels, num_args, num_preds) predict_dict.update({ 'srl_mask': srl_mask, 'loss': srl_loss }) else: predict_dict['srl_mask'] = self.get_srl_loss_mask(srl_scores, num_args, num_preds) predict_dict.update({ "candidate_arg_scores": candidate_arg_scores, "candidate_pred_scores": candidate_pred_scores, "predicates": predicates, "arg_starts": arg_starts, "arg_ends": arg_ends, "arg_scores": arg_scores, "pred_scores": pred_scores, "num_args": num_args, "num_preds": num_preds, "arg_labels": torch.max(srl_scores, 1)[1], # [num_sentences, num_args, num_preds] "srl_scores": srl_scores, }) return predict_dict class SpanRankingSRLModel(nn.Module): def __init__(self, config, embed: torch.nn.Module, context_layer: torch.nn.Module, label_space_size): super(SpanRankingSRLModel, self).__init__() self.config = config self.dropout = float(config.dropout) self.lexical_dropout = float(self.config.lexical_dropout) self.label_space_size = label_space_size # Initialize layers and parameters self.word_embedding_dim = embed.get_output_dim() # get the embedding dim self.embed = embed # Initialize context layer self.context_layer = context_layer context_layer_output_dim = context_layer.get_output_dim() self.decoder = SpanRankingSRLDecoder(context_layer_output_dim, label_space_size, config) def forward(self, batch: Dict[str, torch.Tensor] ): gold_arg_ends, gold_arg_labels, gold_arg_starts, gold_predicates, masks, sent_lengths = \ self.unpack(batch, training=self.training) context_embeddings = self.embed(batch) context_embeddings = F.dropout(context_embeddings, self.lexical_dropout, self.training) contextualized_embeddings = self.context_layer(context_embeddings, masks) return self.decoder.decode(contextualized_embeddings, sent_lengths, masks, gold_arg_starts, gold_arg_ends, gold_arg_labels, gold_predicates) @staticmethod def unpack(batch, mask=None, training=False): keys = 'token_length', 'predicate_offset', 'argument_begin_offset', 'argument_end_offset', 'srl_label_id' sent_lengths, gold_predicates, gold_arg_starts, gold_arg_ends, gold_arg_labels = [batch.get(k, None) for k in keys] if mask is None: mask = util.lengths_to_mask(sent_lengths) # elif not training: # sent_lengths = mask.sum(dim=1) return gold_arg_ends, gold_arg_labels, gold_arg_starts, gold_predicates, mask, sent_lengths
super().__init__() self.config = config self.label_space_size = label_space_size self.dropout = float(config.dropout) self.use_gold_predicates = config.use_gold_predicates # span width feature embedding self.span_width_embedding = nn.Embedding(self.config.max_arg_width, self.config.span_width_feature_size) # self.context_projective_layer = nn.Linear(2 * self.lstm_hidden_size, self.config.num_attention_heads) # span scores self.span_emb_size = 3 * context_layer_output_dim + self.config.span_width_feature_size self.arg_unary_score_layers = nn.ModuleList([nn.Linear(self.span_emb_size, self.config.ffnn_size) if i == 0 else nn.Linear(self.config.ffnn_size, self.config.ffnn_size) for i in range(self.config.ffnn_depth)]) # [,150] self.arg_dropout_layers = nn.ModuleList([nn.Dropout(self.dropout) for _ in range(self.config.ffnn_depth)]) self.arg_unary_score_projection = nn.Linear(self.config.ffnn_size, 1) # predicate scores self.pred_unary_score_layers = nn.ModuleList( [nn.Linear(context_layer_output_dim, self.config.ffnn_size) if i == 0 else nn.Linear(self.config.ffnn_size, self.config.ffnn_size) for i in range(self.config.ffnn_depth)]) # [,150] self.pred_dropout_layers = nn.ModuleList([nn.Dropout(self.dropout) for _ in range(self.config.ffnn_depth)]) self.pred_unary_score_projection = nn.Linear(self.config.ffnn_size, 1) # srl scores self.srl_unary_score_input_size = self.span_emb_size + context_layer_output_dim self.srl_unary_score_layers = nn.ModuleList([nn.Linear(self.srl_unary_score_input_size, self.config.ffnn_size) if i == 0 else nn.Linear(self.config.ffnn_size, self.config.ffnn_size) for i in range(self.config.ffnn_depth)]) self.srl_dropout_layers = nn.ModuleList([nn.Dropout(self.dropout) for _ in range(self.config.ffnn_depth)]) self.srl_unary_score_projection = nn.Linear(self.config.ffnn_size, self.label_space_size - 1) if config.use_biaffine: self.predicate_scale = TimeDistributed(FeedForward(context_layer_output_dim, 1, self.span_emb_size, 'ReLU')) self.biaffine = Biaffine(self.span_emb_size, self.label_space_size - 1) self.loss_reduction = config.loss_reduction self.reset_parameters()
tdfReader.js
/* * The MIT License (MIT) * * Copyright (c) 2016 University of California San Diego * Author: Jim Robinson * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ import BinaryParser from "../binary.js"; import {igvxhr, BGZip} from "../../node_modules/igv-utils/src/index.js"; import {buildOptions} from "../util/igvUtils.js"; const GZIP_FLAG = 0x1; class TDFReader { constructor(config, genome) { this.config = config; this.genome = genome; this.path = config.url; this.groupCache = {}; this.datasetCache = {}; } async readHeader() { if (this.magic !== undefined) { return this; // Already read } let data = await igvxhr.loadArrayBuffer(this.path, buildOptions(this.config, {range: {start: 0, size: 64000}})) let binaryParser = new BinaryParser(new DataView(data)); this.magic = binaryParser.getInt(); this.version = binaryParser.getInt(); this.indexPos = binaryParser.getLong(); this.indexSize = binaryParser.getInt(); const headerSize = binaryParser.getInt(); if (this.version >= 2) { let nWindowFunctions = binaryParser.getInt(); this.windowFunctions = []; while (nWindowFunctions-- > 0) { this.windowFunctions.push(binaryParser.getString()); } } this.trackType = binaryParser.getString(); this.trackLine = binaryParser.getString(); let nTracks = binaryParser.getInt(); this.trackNames = []; while (nTracks-- > 0) { this.trackNames.push(binaryParser.getString()); } this.genomeID = binaryParser.getString(); this.flags = binaryParser.getInt(); this.compressed = (this.flags & GZIP_FLAG) !== 0; // Now read index data = await igvxhr.loadArrayBuffer(this.path, buildOptions(this.config, { range: { start: this.indexPos, size: this.indexSize } })) binaryParser = new BinaryParser(new DataView(data)); this.datasetIndex = {}; let nEntries = binaryParser.getInt(); while (nEntries-- > 0) { const name = binaryParser.getString(); const pos = binaryParser.getLong(); const size = binaryParser.getInt(); this.datasetIndex[name] = {position: pos, size: size}; } this.groupIndex = {}; nEntries = binaryParser.getInt(); while (nEntries-- > 0) { const name = binaryParser.getString(); const pos = binaryParser.getLong(); const size = binaryParser.getInt(); this.groupIndex[name] = {position: pos, size: size}; } return this; } async readDataset(chr, windowFunction, zoom) { const key = chr + "_" + windowFunction + "_" + zoom; if (this.datasetCache[key]) { return this.datasetCache[key]; } else { await this.readHeader() const wf = (this.version < 2) ? "" : "/" + windowFunction; const zoomString = (chr.toLowerCase() === "all" || zoom === undefined) ? "0" : zoom.toString(); let dsName; if (windowFunction === "raw") { dsName = "/" + chr + "/raw"; } else { dsName = "/" + chr + "/z" + zoomString + wf; } const indexEntry = this.datasetIndex[dsName]; if (indexEntry === undefined) { return undefined; } const data = await igvxhr.loadArrayBuffer(this.path, buildOptions(this.config, { range: { start: indexEntry.position, size: indexEntry.size } })); if (!data) { return undefined; } const binaryParser = new BinaryParser(new DataView(data)); let nAttributes = binaryParser.getInt(); const attributes = {}; while (nAttributes-- > 0) { attributes[binaryParser.getString()] = binaryParser.getString(); } const dataType = binaryParser.getString(); const tileWidth = binaryParser.getFloat(); let nTiles = binaryParser.getInt(); const tiles = []; while (nTiles-- > 0) { tiles.push({position: binaryParser.getLong(), size: binaryParser.getInt()}); } const dataset = { name: dsName, attributes: attributes, dataType: dataType, tileWidth: tileWidth, tiles: tiles } this.datasetCache[key] = dataset; return dataset; } } async readRootGroup() { const genome = this.genome; const rootGroup = this.groupCache["/"]; if (rootGroup) { return rootGroup; } else { const group = await this.readGroup("/"); const names = group["chromosomes"]; const maxZoomString = group["maxZoom"]; // Now parse out interesting attributes. if (maxZoomString) { this.maxZoom = Number(maxZoomString); } const totalCountString = group["totalCount"]; if(totalCountString) { group.totalCount = Number.parseFloat(totalCountString); } // Chromosome names const chrAliasTable = {}; if (names) { names.split(",").forEach(function (chr) { const canonicalName = genome.getChromosomeName(chr); chrAliasTable[canonicalName] = chr; }) } this.chrAliasTable = chrAliasTable; this.groupCache["/"] = group; return group; } } async readGroup(name) { const group = this.groupCache[name]; if (group) { return group; } else { await this.readHeader() const indexEntry = this.groupIndex[name]; if (indexEntry === undefined) { return undefined; } const data = await igvxhr.loadArrayBuffer(this.path, buildOptions(this.config, { range: { start: indexEntry.position, size: indexEntry.size } })) if (!data) { return undefined; } const binaryParser = new BinaryParser(new DataView(data)); const group = {name: name}; let nAttributes = binaryParser.getInt(); while (nAttributes-- > 0) { const key = binaryParser.getString(); const value = binaryParser.getString(); group[key] = value; } this.groupCache[name] = group; return group; } } async readTiles(tileIndeces, nTracks) { tileIndeces.sort(function (a, b) { return a.position - b.position; }) tileIndeces = tileIndeces.filter(function (idx) { return idx.size > 0; }); if (tileIndeces.length === 0) { return Promise.resolve([]); } const firstEntry = tileIndeces[0]; const lastEntry = tileIndeces[tileIndeces.length - 1]; const position = firstEntry.position; const size = (lastEntry.position + lastEntry.size) - position; const data = await igvxhr.loadArrayBuffer(this.path, buildOptions(this.config, { range: { start: position, size: size } })) const tiles = []; // Loop through and decode tiles for (let indexEntry of tileIndeces) { const start = indexEntry.position - position; const size = indexEntry.size; if (size > 0) { let tileData; if (this.compressed) { const plain = BGZip.inflate(data.slice(start, start + size)); tileData = plain.buffer; } else { tileData = data.slice(start, start + size); } const binaryParser = new BinaryParser(new DataView(tileData)); const type = binaryParser.getString(); let tile; switch (type) { case "fixedStep": tile = createFixedStep(binaryParser, nTracks); break; case "variableStep": tile = createVariableStep(binaryParser, nTracks); break; case "bed": case "bedWithName": tile = createBed(binaryParser, nTracks, type); break; default: throw "Unknown tile type: " + type; } tiles.push(tile); } } return tiles; } async readTile(indexEntry, nTracks) { let data = await igvxhr.loadArrayBuffer(this.path, buildOptions(this.config, { range: { start: indexEntry.position, size: indexEntry.size } })) if (this.compressed) { const plain = BGZip.inflate(data); data = plain.buffer; } const binaryParser = new BinaryParser(new DataView(data)); const type = binaryParser.getString(); switch (type) { case "fixedStep": return createFixedStep(binaryParser, nTracks); case "variableStep": return createVariableStep(binaryParser, nTracks); case "bed": case "bedWithName": return createBed(binaryParser, nTracks, type); default: throw "Unknown tile type: " + type; } } } function createFixedStep(binaryParser, nTracks) { const nPositions = binaryParser.getInt(); const start = binaryParser.getInt(); const span = binaryParser.getFloat(); const data = []; let nt = nTracks; while (nt-- > 0) { let np = nPositions; const dtrack = []; while (np-- > 0) { dtrack.push(binaryParser.getFloat()); } data.push(dtrack); } return { type: "fixedStep", start: start, span: span, data: data, nTracks: nTracks, nPositions: nPositions } } function
(binaryParser, nTracks) { const tileStart = binaryParser.getInt(); const span = binaryParser.getFloat(); const nPositions = binaryParser.getInt(); const start = []; let np = nPositions; while (np-- > 0) { start.push(binaryParser.getInt()); } const nS = binaryParser.getInt(); // # of samples, ignored but should === nTracks const data = []; let nt = nTracks; while (nt-- > 0) { np = nPositions; const dtrack = []; while (np-- > 0) { dtrack.push(binaryParser.getFloat()); } data.push(dtrack); } return { type: "variableStep", tileStart: tileStart, span: span, start: start, data: data, nTracks: nTracks, nPositions: nPositions } } function createBed(binaryParser, nTracks, type) { const nPositions = binaryParser.getInt(); let n = nPositions; const start = []; while (n-- > 0) { start.push(binaryParser.getInt()); } n = nPositions; const end = []; while (n-- > 0) { end.push(binaryParser.getInt()); } const nS = binaryParser.getInt(); // # of samples, ignored but should === nTracks const data = []; let nt = nTracks; while (nt-- > 0) { let np = nPositions; const dtrack = []; while (np-- > 0) { dtrack.push(binaryParser.getFloat()); } data.push(dtrack); } if (type === "bedWithName") { n = nPositions; const name = []; while (n-- > 0) { name.push(binaryParser.getString()); } } return { type: type, start: start, end: end, data: data, nTracks: nTracks, nPositions: nPositions } } export default TDFReader;
createVariableStep
anonymized_dns.rs
use crate::errors::*; use crate::*; use byteorder::{BigEndian, ByteOrder}; use ipext::IpExt; use siphasher::sip128::Hasher128; use std::hash::Hasher; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; use std::sync::Arc; use tokio::net::UdpSocket; pub const ANONYMIZED_DNSCRYPT_QUERY_MAGIC: [u8; 10] = [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00]; pub const ANONYMIZED_DNSCRYPT_OVERHEAD: usize = 16 + 2; pub const RELAYED_CERT_CACHE_SIZE: usize = 1000; pub const RELAYED_CERT_CACHE_TTL: u32 = 600; pub async fn handle_anonymized_dns( globals: Arc<Globals>, client_ctx: ClientCtx, relayed_packet: &[u8], ) -> Result<(), Error> { ensure!( relayed_packet.len() > ANONYMIZED_DNSCRYPT_OVERHEAD, "Short packet" ); let ip_bin = &relayed_packet[..16]; let ip_v6 = Ipv6Addr::new( BigEndian::read_u16(&ip_bin[0..2]), BigEndian::read_u16(&ip_bin[2..4]), BigEndian::read_u16(&ip_bin[4..6]), BigEndian::read_u16(&ip_bin[6..8]), BigEndian::read_u16(&ip_bin[8..10]), BigEndian::read_u16(&ip_bin[10..12]), BigEndian::read_u16(&ip_bin[12..14]), BigEndian::read_u16(&ip_bin[14..16]), ); let ip = match ip_v6.to_ipv4() { Some(ip_v4) => IpAddr::V4(ip_v4), None => IpAddr::V6(ip_v6), }; #[cfg(feature = "metrics")] globals.varz.anonymized_queries.inc(); ensure!(IpExt::is_global(&ip), "Forbidden upstream address"); ensure!( !globals.anonymized_dns_blacklisted_ips.contains(&ip), "Blacklisted upstream IP" ); let port = BigEndian::read_u16(&relayed_packet[16..18]); ensure!( (globals.anonymized_dns_allow_non_reserved_ports && port >= 1024) || globals.anonymized_dns_allowed_ports.contains(&port), "Forbidden upstream port" ); let upstream_address = SocketAddr::new(ip, port); ensure!( !globals.listen_addrs.contains(&upstream_address) && globals.external_addr != Some(upstream_address), "Would be relaying to self" ); let encrypted_packet = &relayed_packet[ANONYMIZED_DNSCRYPT_OVERHEAD..]; let encrypted_packet_len = encrypted_packet.len(); ensure!( encrypted_packet_len >= ANONYMIZED_DNSCRYPT_QUERY_MAGIC.len() + DNS_HEADER_SIZE && encrypted_packet_len <= DNSCRYPT_UDP_QUERY_MAX_SIZE, "Unexpected encapsulated query length" ); ensure!( encrypted_packet_len > 8 && [0u8, 0, 0, 0, 0, 0, 0, 1] != encrypted_packet[..8], "Protocol confusion with QUIC" ); debug_assert!(DNSCRYPT_UDP_QUERY_MIN_SIZE > ANONYMIZED_DNSCRYPT_QUERY_MAGIC.len()); ensure!( encrypted_packet[..ANONYMIZED_DNSCRYPT_QUERY_MAGIC.len()] != ANONYMIZED_DNSCRYPT_QUERY_MAGIC, "Loop detected" ); let ext_socket = match globals.external_addr { Some(x) => UdpSocket::bind(x).await?, None => match upstream_address { SocketAddr::V4(_) => { UdpSocket::bind(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0))).await? } SocketAddr::V6(s) => { UdpSocket::bind(SocketAddr::V6(SocketAddrV6::new( Ipv6Addr::UNSPECIFIED, 0, s.flowinfo(), s.scope_id(), ))) .await? } }, }; ext_socket.connect(&upstream_address).await?; ext_socket.send(encrypted_packet).await?; let mut response = vec![0u8; DNSCRYPT_UDP_RESPONSE_MAX_SIZE]; let (response_len, is_certificate_response) = loop { let fut = ext_socket.recv_from(&mut response[..]); let (response_len, response_addr) = fut.await?; if response_addr != upstream_address { continue; } if is_encrypted_response(&response, response_len) { break (response_len, false); } if is_certificate_response(&response, encrypted_packet) { break (response_len, true); } }; response.truncate(response_len); if is_certificate_response { let mut hasher = globals.hasher; hasher.write(&relayed_packet[..ANONYMIZED_DNSCRYPT_OVERHEAD]); hasher.write(&dns::qname(encrypted_packet)?); let packet_hash = hasher.finish128().as_u128(); let cached_response = { match globals.cert_cache.lock().get(&packet_hash) { None => None, Some(response) if !(*response).has_expired() => { trace!("Relayed certificate cached"); let mut cached_response = (*response).clone(); cached_response.set_tid(dns::tid(encrypted_packet)); Some(cached_response.into_response()) } Some(_) => { trace!("Relayed certificate expired"); None } } }; match cached_response { None => { globals.cert_cache.lock().insert( packet_hash, CachedResponse::new(&globals.cert_cache, response.clone()), ); } Some(cached_response) => response = cached_response, } } #[cfg(feature = "metrics")] globals.varz.anonymized_responses.inc(); respond_to_query(client_ctx, response).await } #[inline] fn
(response: &[u8], response_len: usize) -> bool { (DNSCRYPT_UDP_RESPONSE_MIN_SIZE..=DNSCRYPT_UDP_RESPONSE_MAX_SIZE).contains(&response_len) && response[..DNSCRYPT_RESPONSE_MAGIC_SIZE] == DNSCRYPT_RESPONSE_MAGIC } fn is_certificate_response(response: &[u8], query: &[u8]) -> bool { let prefix = b"2.dnscrypt-cert."; if !((DNS_HEADER_SIZE + prefix.len() + 4..=DNS_MAX_PACKET_SIZE).contains(&query.len()) && (DNS_HEADER_SIZE + prefix.len() + 4..=DNS_MAX_PACKET_SIZE).contains(&response.len()) && dns::tid(response) == dns::tid(query) && dns::is_response(response) && !dns::is_response(query)) { debug!("Unexpected relayed cert response"); return false; } let qname = match (dns::qname(query), dns::qname(response)) { (Ok(response_qname), Ok(query_qname)) if response_qname == query_qname => query_qname, _ => { debug!("Relayed cert qname response didn't match the query qname"); return false; } }; if qname.len() <= prefix.len() || &qname[..prefix.len()] != prefix { debug!("Relayed cert qname response didn't start with the standard prefix"); return false; } true }
is_encrypted_response
generic-widget.rs
/* * Copyright (c) 2017 Boucher, Antoni <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ extern crate gtk; #[macro_use] extern crate relm; #[macro_use] extern crate relm_derive; use std::fmt::Display; use gtk::{ Button, ButtonExt, ContainerExt, Inhibit, Label, LabelExt, WidgetExt, Window, WindowType, }; use gtk::Orientation::{Horizontal, Vertical}; use relm::{Component, ContainerWidget, Relm, Update, Widget}; use self::CounterMsg::*; use self::Msg::*; trait IncDec { fn dec(&mut self); fn identity() -> Self; fn inc(&mut self); } impl IncDec for i32 { fn dec(&mut self) { *self -= 1; } fn identity() -> Self { 1 } fn inc(&mut self) { *self += 1; } } struct Model<T> { counter: T, } #[derive(Msg)] enum CounterMsg<T> { Decrement, Increment(T), } struct Counter<T> { counter_label: Label, model: Model<T>, vbox: gtk::Box, }
type Model = Model<T>; type ModelParam = T; type Msg = CounterMsg<T>; fn model(_: &Relm<Self>, value: T) -> Self::Model { Model { counter: value, } } fn update(&mut self, event: CounterMsg<T>) { let label = &self.counter_label; match event { Decrement => { self.model.counter.dec(); label.set_text(&self.model.counter.to_string()); }, Increment(_) => { self.model.counter.inc(); label.set_text(&self.model.counter.to_string()); }, } } } impl<T: Clone + IncDec + Display + 'static> Widget for Counter<T> { type Root = gtk::Box; fn root(&self) -> Self::Root { self.vbox.clone() } fn view(relm: &Relm<Self>, model: Self::Model) -> Self { let vbox = gtk::Box::new(Vertical, 0); let plus_button = Button::new_with_label("+"); vbox.add(&plus_button); let counter_label = Label::new(Some(model.counter.to_string().as_ref())); vbox.add(&counter_label); let minus_button = Button::new_with_label("-"); vbox.add(&minus_button); connect!(relm, plus_button, connect_clicked(_), Increment(T::identity())); connect!(relm, minus_button, connect_clicked(_), Decrement); Counter { counter_label: counter_label, model, vbox: vbox, } } } #[derive(Msg)] enum Msg { Quit, } struct Win { _counter1: Component<Counter<i32>>, _counter2: Component<Counter<i32>>, window: Window, } impl Update for Win { type Model = (); type ModelParam = (); type Msg = Msg; fn model(_: &Relm<Self>, _: ()) -> () { () } fn update(&mut self, event: Msg) { match event { Quit => gtk::main_quit(), } } } impl Widget for Win { type Root = Window; fn root(&self) -> Self::Root { self.window.clone() } fn view(relm: &Relm<Self>, _model: ()) -> Win { let window = Window::new(WindowType::Toplevel); let hbox = gtk::Box::new(Horizontal, 0); let counter1 = hbox.add_widget::<Counter<i32>, _>(relm, 2); let counter2 = hbox.add_widget::<Counter<i32>, _>(relm, 3); window.add(&hbox); window.show_all(); connect!(relm, window, connect_delete_event(_, _), return (Some(Quit), Inhibit(false))); Win { _counter1: counter1, _counter2: counter2, window: window, } } } fn main() { Win::run(()).unwrap(); }
impl<T: Clone + IncDec + Display + 'static> Update for Counter<T> {
actor.rs
use std::io::SeekFrom; use std::path::{Path, PathBuf}; use log::info; use oxidized_json_checker::JsonChecker; use tokio::fs; use tokio::io::{AsyncSeekExt, AsyncWriteExt}; use tokio::sync::mpsc; use uuid::Uuid; use crate::index_controller::index_actor::IndexActorHandle; use crate::index_controller::{get_arc_ownership_blocking, UpdateMeta, UpdateStatus}; use super::{PayloadData, Result, UpdateError, UpdateMsg, UpdateStoreStore}; pub struct UpdateActor<D, S, I> { path: PathBuf, store: S, inbox: mpsc::Receiver<UpdateMsg<D>>, index_handle: I, } impl<D, S, I> UpdateActor<D, S, I> where D: AsRef<[u8]> + Sized + 'static, S: UpdateStoreStore, I: IndexActorHandle + Clone + Send + Sync + 'static, { pub fn new( store: S, inbox: mpsc::Receiver<UpdateMsg<D>>, path: impl AsRef<Path>, index_handle: I, ) -> anyhow::Result<Self> { let path = path.as_ref().to_owned(); std::fs::create_dir_all(path.join("update_files"))?; assert!(path.exists()); Ok(Self { store, inbox, path, index_handle, }) } pub async fn run(mut self) { use UpdateMsg::*; info!("Started update actor."); loop { match self.inbox.recv().await { Some(Update { uuid, meta, data, ret, }) => { let _ = ret.send(self.handle_update(uuid, meta, data).await); } Some(ListUpdates { uuid, ret }) => { let _ = ret.send(self.handle_list_updates(uuid).await); } Some(GetUpdate { uuid, ret, id }) => { let _ = ret.send(self.handle_get_update(uuid, id).await); } Some(Delete { uuid, ret }) => { let _ = ret.send(self.handle_delete(uuid).await); } Some(Create { uuid, ret }) => { let _ = ret.send(self.handle_create(uuid).await); } Some(Snapshot { uuid, path, ret }) => { let _ = ret.send(self.handle_snapshot(uuid, path).await); } Some(GetSize { uuid, ret }) => { let _ = ret.send(self.handle_get_size(uuid).await); } None => break, } } } async fn handle_update( &self, uuid: Uuid, meta: UpdateMeta, mut payload: mpsc::Receiver<PayloadData<D>>, ) -> Result<UpdateStatus> { let update_store = self.store.get_or_create(uuid).await?; let update_file_id = uuid::Uuid::new_v4(); let path = self .path .join(format!("update_files/update_{}", update_file_id)); let mut file = fs::OpenOptions::new() .read(true) .write(true) .create(true) .open(&path) .await .map_err(|e| UpdateError::Error(Box::new(e)))?; while let Some(bytes) = payload.recv().await { match bytes { Ok(bytes) => { file.write_all(bytes.as_ref()) .await .map_err(|e| UpdateError::Error(Box::new(e)))?; } Err(e) => { return Err(UpdateError::Error(e)); } } } file.flush() .await .map_err(|e| UpdateError::Error(Box::new(e)))?; file.seek(SeekFrom::Start(0)) .await .map_err(|e| UpdateError::Error(Box::new(e)))?; let mut file = file.into_std().await; tokio::task::spawn_blocking(move || { use std::io::{copy, sink, BufReader, Seek}; // If the payload is empty, ignore the check. if file .metadata() .map_err(|e| UpdateError::Error(Box::new(e)))? .len() > 0 { // Check that the json payload is valid: let reader = BufReader::new(&mut file); let mut checker = JsonChecker::new(reader); if copy(&mut checker, &mut sink()).is_err() || checker.finish().is_err() { // The json file is invalid, we use Serde to get a nice error message: file.seek(SeekFrom::Start(0)) .map_err(|e| UpdateError::Error(Box::new(e)))?; let _: serde_json::Value = serde_json::from_reader(file) .map_err(|e| UpdateError::Error(Box::new(e)))?; } } // The payload is valid, we can register it to the update store. update_store .register_update(meta, path, uuid) .map(UpdateStatus::Enqueued) .map_err(|e| UpdateError::Error(Box::new(e))) }) .await .map_err(|e| UpdateError::Error(Box::new(e)))? } async fn
(&self, uuid: Uuid) -> Result<Vec<UpdateStatus>> { let update_store = self.store.get(uuid).await?; tokio::task::spawn_blocking(move || { let result = update_store .ok_or(UpdateError::UnexistingIndex(uuid))? .list() .map_err(|e| UpdateError::Error(e.into()))?; Ok(result) }) .await .map_err(|e| UpdateError::Error(Box::new(e)))? } async fn handle_get_update(&self, uuid: Uuid, id: u64) -> Result<UpdateStatus> { let store = self .store .get(uuid) .await? .ok_or(UpdateError::UnexistingIndex(uuid))?; let result = store .meta(id) .map_err(|e| UpdateError::Error(Box::new(e)))? .ok_or(UpdateError::UnexistingUpdate(id))?; Ok(result) } async fn handle_delete(&self, uuid: Uuid) -> Result<()> { let store = self.store.delete(uuid).await?; if let Some(store) = store { tokio::task::spawn(async move { let store = get_arc_ownership_blocking(store).await; tokio::task::spawn_blocking(move || { store.prepare_for_closing().wait(); info!("Update store {} was closed.", uuid); }); }); } Ok(()) } async fn handle_create(&self, uuid: Uuid) -> Result<()> { let _ = self.store.get_or_create(uuid).await?; Ok(()) } async fn handle_snapshot(&self, uuid: Uuid, path: PathBuf) -> Result<()> { let index_handle = self.index_handle.clone(); if let Some(update_store) = self.store.get(uuid).await? { tokio::task::spawn_blocking(move || -> anyhow::Result<()> { // acquire write lock to prevent further writes during snapshot // the update lock must be acquired BEFORE the write lock to prevent dead lock let _lock = update_store.update_lock.lock(); let mut txn = update_store.env.write_txn()?; // create db snapshot update_store.snapshot(&mut txn, &path, uuid)?; futures::executor::block_on( async move { index_handle.snapshot(uuid, path).await }, )?; Ok(()) }) .await .map_err(|e| UpdateError::Error(e.into()))? .map_err(|e| UpdateError::Error(e.into()))?; } Ok(()) } async fn handle_get_size(&self, uuid: Uuid) -> Result<u64> { let size = match self.store.get(uuid).await? { Some(update_store) => tokio::task::spawn_blocking(move || -> anyhow::Result<u64> { let txn = update_store.env.read_txn()?; update_store.get_size(&txn) }) .await .map_err(|e| UpdateError::Error(e.into()))? .map_err(|e| UpdateError::Error(e.into()))?, None => 0, }; Ok(size) } }
handle_list_updates
index.tsx
type Props = { uri: string | null; }; const Photo: React.FC<Props> = ({ uri }) => { if (uri) { return <ProductImage uri={uri} width={160} height={160} />; } return ( <S.Container> <S.Title>Nenhuma foto carregada</S.Title> </S.Container> ); }; export default Photo;
import React from 'react'; import ProductImage from '@/components/ProductImage'; import * as S from './styles';
Reduced.py
# Copyright 2004 by Iddo Friedberg. # All rights reserved. # # This file is part of the Biopython distribution and governed by your # choice of the "Biopython License Agreement" or the "BSD 3-Clause License". # Please see the LICENSE file that should have been included as part of this # package. """Reduced alphabets which lump together several amino-acids into one letter. Reduced (redundant or simplified) alphabets are used to represent protein sequences using an alternative alphabet which lumps together several amino-acids into one letter, based on physico-chemical traits. For example, all the aliphatics (I,L,V) are usually quite interchangeable, so many sequence studies group them into one letter Examples of reduced alphabets are available in: http://viscose.herokuapp.com/html/alphabets.html The Murphy tables are from here: Murphy L.R., Wallqvist A, Levy RM. (2000) Simplified amino acid alphabets for protein fold recognition and implications for folding. Protein Eng. 13(3):149-152 These alphabets have been used with Bio.utils.reduce_sequence, which has been removed from Biopython. You can use this is alphabets and tables like this: >>> from Bio.Seq import Seq >>> from Bio import Alphabet >>> from Bio.Alphabet import Reduced >>> my_protein = Seq('MAGSKEWKRFCELTINEA', Alphabet.ProteinAlphabet()) Now, we convert this sequence into a sequence which only recognizes polar (P) or hydrophobic (H) residues: >>> new_protein = Seq('', Alphabet.Reduced.HPModel()) >>> for aa in my_protein: ... new_protein += Alphabet.Reduced.hp_model_tab[aa] >>> new_protein Seq('HPPPPPHPPHHPHPHPPP') The following Alphabet classes are available: - Murphy15: Maps 20 amino acids to 15, use murphy_15_tab for conversion, ambiguous letters: L: LVIM, F: FY, K: KR - Murphy10: Maps 20 amino acids to 10, use murphy_10_tab for conversion, ambiguous letters: L: LVIM, S: ST, F: FYW, E: EDNQ, K: KR - Murphy8: Maps 20 amino acids to 8, use murphy_8_tab for conversion, ambiguous letters: L: LVIMC, A: AG, S: ST, F: FYW, E: EDNQ, K: KR - Murphy4: Maps 20 amino acids to 4, use murphy_4_tab for conversion, ambiguous letters: L: LVIMC, A: AGSTP, F: FYW, E: EDNQKRH - HPModel: Groups amino acids as polar (hydrophilic) or hydrophobic (non-polar), use hp_model_tab for conversion, P: AGTSNQDEHRKP, H: CMFILVWY - PC5: Amino acids grouped according to 5 physico-chemical properties, use pc_5_table for conversion, A (Aliphatic): IVL, R (aRomatic): FYWH, C (Charged): KRDE, T (Tiny): GACS, D (Diverse): TMQNP """ from Bio import Alphabet murphy_15_tab = { "L": "L", "V": "L", "I": "L", "M": "L", "C": "C", "A": "A", "G": "G", "S": "S", "T": "T", "P": "P", "F": "F", "Y": "F", "W": "W", "E": "E", "D": "D", "N": "N", "Q": "Q", "K": "K", "R": "K", "H": "H", } class Murphy15(Alphabet.ProteinAlphabet):
murphy_15 = Murphy15() murphy_10_tab = { "L": "L", "V": "L", "I": "L", "M": "L", "C": "C", "A": "A", "G": "G", "S": "S", "T": "S", "P": "P", "F": "F", "Y": "F", "W": "F", "E": "E", "D": "E", "N": "E", "Q": "E", "K": "K", "R": "K", "H": "H", } class Murphy10(Alphabet.ProteinAlphabet): """Reduced protein alphabet with 10 letters. Letters: A, C, G, H, P, L(LVIM), S(ST), F(FYW), E(EDNQ), K(KR) """ letters = "LCAGSPFEKH" size = 1 murphy_10 = Murphy10() murphy_8_tab = { "L": "L", "V": "L", "I": "L", "M": "L", "C": "L", "A": "A", "G": "A", "S": "S", "T": "S", "P": "P", "F": "F", "Y": "F", "W": "F", "E": "E", "D": "E", "N": "E", "Q": "E", "K": "K", "R": "K", "H": "H", } class Murphy8(Alphabet.ProteinAlphabet): """Reduced protein alphabet with 8 letters. Letters: H, P, L(LVIMC), A(AG), S(ST), F(FYW), E(EDNQ), K(KR) """ letters = "LASPFEKH" size = 1 murphy_8 = Murphy8() murphy_4_tab = { "L": "L", "V": "L", "I": "L", "M": "L", "C": "L", "A": "A", "G": "A", "S": "A", "T": "A", "P": "A", "F": "F", "Y": "F", "W": "F", "E": "E", "D": "E", "N": "E", "Q": "E", "K": "E", "R": "E", "H": "E", } class Murphy4(Alphabet.ProteinAlphabet): """Reduced protein alphabet with 4 letters. Letters: L(LVIMC), A(AGSTP), F(FYW), E(EDNQKRH) """ letters = "LAFE" size = 1 murphy_4 = Murphy4() hp_model_tab = { "A": "P", # Hydrophilic "G": "P", "T": "P", "S": "P", "N": "P", "Q": "P", "D": "P", "E": "P", "H": "P", "R": "P", "K": "P", "P": "P", "C": "H", # Hydrophobic "M": "H", "F": "H", "I": "H", "L": "H", "V": "H", "W": "H", "Y": "H", } class HPModel(Alphabet.ProteinAlphabet): """Reduced protein alphabet with only two letters for polar or hydophobic. Letters: P (polar: AGTSNQDEHRKP), H (hydrophobic: CMFILVWY) """ letters = "HP" size = 1 hp_model = HPModel() pc_5_table = { "I": "A", # Aliphatic "V": "A", "L": "A", "F": "R", # Aromatic "Y": "R", "W": "R", "H": "R", "K": "C", # Charged "R": "C", "D": "C", "E": "C", "G": "T", # Tiny "A": "T", "C": "T", "S": "T", "T": "D", # Diverse "M": "D", "Q": "D", "N": "D", "P": "D", } class PC5(Alphabet.ProteinAlphabet): """Reduced protein alphabet with 5 letters for physico-chemical properties. Letters: A (Aliphatic: IVL), R (aRomatic: FYWH), C (Charged: KRDE), T (Tiny: GACS), D (Diverse: TMQNP) """ letters = "ARCTD" size = 1 pc5 = PC5()
"""Reduced protein alphabet with 15 letters. Letters: A, C, D, E, G, H, N, P, Q, S, T, W, L(LVIM), F(FY), K(KR) """ letters = "LCAGSTPFWEDNQKH" size = 1
connection_integration_test.go
// Licensed to Elasticsearch B.V. under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. Elasticsearch B.V. licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //go:build integration // +build integration package connection import ( "testing" "github.com/elastic/beats/v7/libbeat/tests/compose" mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" ) func TestData(t *testing.T)
func getConfig(host string) map[string]interface{} { return map[string]interface{}{ "module": "zookeeper", "metricsets": []string{"connection"}, "hosts": []string{host}, } }
{ service := compose.EnsureUp(t, "zookeeper") f := mbtest.NewReportingMetricSetV2Error(t, getConfig(service.Host())) if err := mbtest.WriteEventsReporterV2Error(f, t, ""); err != nil { t.Fatal("write", err) } }
PostRepositoryImpl.go
package impl import ( "errors" "fmt" "go-blog-jwt-token/api/entities" "go-blog-jwt-token/api/payloads/request" "gorm.io/gorm" "math" "strings" ) type PostRepositoryImpl struct { db *gorm.DB } func
(db *gorm.DB) *PostRepositoryImpl { return &PostRepositoryImpl{db:db} } func (r *PostRepositoryImpl) GetAll(pagination *request.Pagination) (interface{}, error, int) { var posts []entities.Post totalRows := 0 totalPages := 0 fromRow := 0 toRow := 0 offset := pagination.Page * pagination.Limit // get data with limit, offset & order find := r.db.Debug().Where("published=?", 1).Limit(pagination.Limit).Offset(offset).Order(pagination.Sort) // generate where query searchs := pagination.Searchs if searchs != nil { for _, value := range searchs { column := value.Column action := value.Action query := value.Query switch action { case "equals": whereQuery := fmt.Sprintf("%s=?", column) find = find.Where(whereQuery, query) break case "contains": whereQuery := fmt.Sprintf("%s LIKE ?", column) find = find.Where(whereQuery, "%"+query+"%") break case "in": whereQuery := fmt.Sprintf("%s IN (?)", column) queryArray := strings.Split(query, ",") find = find.Where(whereQuery, queryArray) break } } } find = find.Find(&posts) if find.Error != nil { return nil, find.Error, totalPages } pagination.Rows = posts counting := int64(totalRows) // count all data err := r.db.Model(&entities.Post{}).Count(&counting).Error if err != nil { return nil, err, totalPages } totalRows = int(counting) pagination.TotalRows = totalRows // calculate total pages totalPages = int(math.Ceil(float64(totalRows) / float64(pagination.Limit))) - 1 if pagination.Page == 0 { fromRow = 1 toRow = pagination.Limit } else { if pagination.Page <= totalPages { // calculate from & to row fromRow = pagination.Page * pagination.Limit + 1 toRow = (pagination.Page + 1) * pagination.Limit } } if toRow > totalRows { toRow = totalRows } pagination.FromRow = fromRow pagination.ToRow = toRow return pagination, nil, totalPages } func (r *PostRepositoryImpl) CreatePost(postRequest request.PostRequest, urlImage string, categories []string, tags []string) (bool, error) { var article entities.Article var post entities.Post r.db.Where("id_article=?", postRequest.IdArticle).Take(&article) if article.ID == 0 { return false, errors.New("Article tidak ditemukan") } r.db.Where("nama_post=?", postRequest.NamaPost).Take(&post) if post.NamaPost != "" { return false, errors.New("Title sudah ada") } for _, val := range categories { var category entities.Category r.db.Where("id_category=?", val).Take(&category) if category.ID == 0 { return false, errors.New("Category tidak ditemukan") } } for _, val := range tags { var tag entities.Tag r.db.Where("id_tag=?", val).Take(&tag) if tag.ID == 0 { return false, errors.New("Tag tidak ditemukan") } } err := r.db.Exec("INSERT INTO tb_posts (nama_post, slug, image, description, published, id_article, create_by, create_at) VALUES (?, ?, ?, ?, ?, ?, ? ,?)", postRequest.NamaPost, postRequest.Slug, urlImage, postRequest.Description, postRequest.Published, postRequest.IdArticle, postRequest.CreateBy, postRequest.CreateAt, ).Error if err != nil { return false, err } var getPost entities.Post r.db.Where("nama_post=?", postRequest.NamaPost).Take(&getPost) if getPost.ID != 0 { for _, set := range categories { err := r.db.Exec("INSERT INTO tb_posts_has_categories (id_post, id_category) VALUES(?,?)", getPost.ID, set).Error if err != nil { return false, err } } for _, set := range tags { err := r.db.Exec("INSERT INTO tb_posts_has_tags (id_post, id_tag) VALUES(?,?)", getPost.ID, set).Error if err != nil { return false, err } } } return true, nil } func (r *PostRepositoryImpl) FindPost(IdArticle int64, IdPost int64) (entities.Post, error) { var post entities.Post r.db.Where("id_post=? AND id_article=?", IdPost, IdArticle).Take(&post) if post.ID <= 0 { return entities.Post{}, errors.New("Post tidak ditemukan") } return post, nil } func (r *PostRepositoryImpl) UpdatePost(postRequest request.PostRequest, categories []string, urlImage string, tags []string) (bool, error) { var article entities.Article var post entities.Post r.db.Where("id_post=? AND id_article=?", postRequest.ID, postRequest.IdArticle).Take(&post) if post.ID == 0 { return false, errors.New("Post tidak ditemukan") } r.db.Where("id_article=?", postRequest.IdArticle).Take(&article) if article.ID == 0 { return false, errors.New("Article tidak ditemukan") } postCategory := r.db.Model(&entities.PostCategory{}).Where("id_post=?", post.ID).Delete(&entities.PostCategory{}) if postCategory.Error != nil { return false, postCategory.Error } postTag := r.db.Model(&entities.PostTag{}).Where("id_post=?", post.ID).Delete(&entities.PostTag{}) if postTag.Error != nil { return false, postTag.Error } err := r.db.Exec("UPDATE tb_posts SET nama_post=?, slug=?, image=?, description=?, published=?, id_article=?, update_by=?, update_at=? WHERE id_post=?", postRequest.NamaPost, postRequest.Slug, urlImage, postRequest.Description, postRequest.Published, article.ID, postRequest.UpdateBy, postRequest.UpdateAt, post.ID, ).Error if err != nil { return false, err } var getPost entities.Post r.db.Where("nama_post=?", postRequest.NamaPost).Take(&getPost) if getPost.ID != 0 { for _, set := range categories { err := r.db.Exec("INSERT INTO tb_posts_has_categories (id_post, id_category) VALUES(?,?)", getPost.ID, set).Error if err != nil { return false, err } } for _, set := range tags { err := r.db.Exec("INSERT INTO tb_posts_has_tags (id_post, id_tag) VALUES(?,?)", getPost.ID, set).Error if err != nil { return false, err } } } return true, nil } func (r *PostRepositoryImpl) DeletePost(IdArticle int64, IdPost int64) (bool, error) { var post entities.Post r.db.Where("id_post=? AND id_article=?", IdPost, IdArticle).Take(&post) if post.ID <= 0 { return false, errors.New("Post tidak ditemukan") } row := r.db.Model(&entities.Post{}).Where("id_post=?", post.ID).Delete(post) if row.Error != nil { return false, row.Error } return true, nil } func (r *PostRepositoryImpl) PublishPost(IdArticle int64, IdPost int64) (bool, error) { var post entities.Post r.db.Where("id_post=? AND id_article=?", IdPost, IdArticle).Take(&post) if post.ID <= 0 { return false, errors.New("Post tidak ditemukan") } row := r.db.Model(&entities.Post{}).Select("published").Where("id_post=?", post.ID).Update("published", true) if row.Error != nil { return false, row.Error } return true, nil } func (r *PostRepositoryImpl) CancelPost(IdArticle int64, IdPost int64) (bool, error) { var post entities.Post r.db.Where("id_post=? AND id_article=?", IdPost, IdArticle).Take(&post) if post.ID <= 0 { return false, errors.New("Post tidak ditemukan") } row := r.db.Model(&entities.Post{}).Select("published").Where("id_post=?", post.ID).Update("published", false) if row.Error != nil { return false, row.Error } return true, nil }
NewPostRepositoryImpl
ad_group_audience_view_client.go
// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by protoc-gen-go_gapic. DO NOT EDIT. package googleads import ( "context" "fmt" "math" "net/url" "time" gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" gtransport "google.golang.org/api/transport/grpc" resourcespb "github.com/scotthenley/go-googleads/pb/v8/resources" servicespb "github.com/scotthenley/go-googleads/pb/v8/services" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" ) var newAdGroupAudienceViewClientHook clientHook // AdGroupAudienceViewCallOptions contains the retry settings for each method of AdGroupAudienceViewClient. type AdGroupAudienceViewCallOptions struct { GetAdGroupAudienceView []gax.CallOption } func defaultAdGroupAudienceViewGRPCClientOptions() []option.ClientOption { return []option.ClientOption{ internaloption.WithDefaultEndpoint("googleads.googleapis.com:443"), internaloption.WithDefaultMTLSEndpoint("googleads.mtls.googleapis.com:443"), internaloption.WithDefaultAudience("https://googleads.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } } func defaultAdGroupAudienceViewCallOptions() *AdGroupAudienceViewCallOptions { return &AdGroupAudienceViewCallOptions{ GetAdGroupAudienceView: []gax.CallOption{ gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Unavailable, codes.DeadlineExceeded, }, gax.Backoff{ Initial: 5000 * time.Millisecond, Max: 60000 * time.Millisecond, Multiplier: 1.30, }) }), }, } } // internalAdGroupAudienceViewClient is an interface that defines the methods availaible from Google Ads API. type internalAdGroupAudienceViewClient interface { Close() error setGoogleClientInfo(...string) Connection() *grpc.ClientConn GetAdGroupAudienceView(context.Context, *servicespb.GetAdGroupAudienceViewRequest, ...gax.CallOption) (*resourcespb.AdGroupAudienceView, error) } // AdGroupAudienceViewClient is a client for interacting with Google Ads API. // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. // // Service to manage ad group audience views. type AdGroupAudienceViewClient struct { // The internal transport-dependent client. internalClient internalAdGroupAudienceViewClient // The call options for this service. CallOptions *AdGroupAudienceViewCallOptions } // Wrapper methods routed to the internal client. // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *AdGroupAudienceViewClient) Close() error { return c.internalClient.Close() } // setGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *AdGroupAudienceViewClient) setGoogleClientInfo(keyval ...string) { c.internalClient.setGoogleClientInfo(keyval...) } // Connection returns a connection to the API service. // // Deprecated. func (c *AdGroupAudienceViewClient) Connection() *grpc.ClientConn { return c.internalClient.Connection() } // GetAdGroupAudienceView returns the requested ad group audience view in full detail. // // List of thrown errors: // AuthenticationError (at ) // AuthorizationError (at ) // HeaderError (at ) // InternalError (at ) // QuotaError (at ) // RequestError (at ) func (c *AdGroupAudienceViewClient) GetAdGroupAudienceView(ctx context.Context, req *servicespb.GetAdGroupAudienceViewRequest, opts ...gax.CallOption) (*resourcespb.AdGroupAudienceView, error) { return c.internalClient.GetAdGroupAudienceView(ctx, req, opts...) } // adGroupAudienceViewGRPCClient is a client for interacting with Google Ads API over gRPC transport. // // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. type adGroupAudienceViewGRPCClient struct { // Connection pool of gRPC connections to the service. connPool gtransport.ConnPool // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE disableDeadlines bool // Points back to the CallOptions field of the containing AdGroupAudienceViewClient CallOptions **AdGroupAudienceViewCallOptions // The gRPC API client. adGroupAudienceViewClient servicespb.AdGroupAudienceViewServiceClient // The x-goog-* metadata to be sent with each request. xGoogMetadata metadata.MD } // NewAdGroupAudienceViewClient creates a new ad group audience view service client based on gRPC. // The returned client must be Closed when it is done being used to clean up its underlying connections. // // Service to manage ad group audience views. func NewAdGroupAudienceViewClient(ctx context.Context, opts ...option.ClientOption) (*AdGroupAudienceViewClient, error) { clientOpts := defaultAdGroupAudienceViewGRPCClientOptions() if newAdGroupAudienceViewClientHook != nil
disableDeadlines, err := checkDisableDeadlines() if err != nil { return nil, err } connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) if err != nil { return nil, err } client := AdGroupAudienceViewClient{CallOptions: defaultAdGroupAudienceViewCallOptions()} c := &adGroupAudienceViewGRPCClient{ connPool: connPool, disableDeadlines: disableDeadlines, adGroupAudienceViewClient: servicespb.NewAdGroupAudienceViewServiceClient(connPool), CallOptions: &client.CallOptions, } c.setGoogleClientInfo() client.internalClient = c return &client, nil } // Connection returns a connection to the API service. // // Deprecated. func (c *adGroupAudienceViewGRPCClient) Connection() *grpc.ClientConn { return c.connPool.Conn() } // setGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *adGroupAudienceViewGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", versionGo()}, keyval...) kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *adGroupAudienceViewGRPCClient) Close() error { return c.connPool.Close() } func (c *adGroupAudienceViewGRPCClient) GetAdGroupAudienceView(ctx context.Context, req *servicespb.GetAdGroupAudienceViewRequest, opts ...gax.CallOption) (*resourcespb.AdGroupAudienceView, error) { if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { cctx, cancel := context.WithTimeout(ctx, 3600000*time.Millisecond) defer cancel() ctx = cctx } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource_name", url.QueryEscape(req.GetResourceName()))) ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append((*c.CallOptions).GetAdGroupAudienceView[0:len((*c.CallOptions).GetAdGroupAudienceView):len((*c.CallOptions).GetAdGroupAudienceView)], opts...) var resp *resourcespb.AdGroupAudienceView err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.adGroupAudienceViewClient.GetAdGroupAudienceView(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil }
{ hookOpts, err := newAdGroupAudienceViewClientHook(ctx, clientHookParams{}) if err != nil { return nil, err } clientOpts = append(clientOpts, hookOpts...) }
converter.py
# -*- coding: utf-8 -*- """ Convert README file from Markdown to text """ from markdown import markdown import html2text # from `aaronsw` :( MARKER = 'MARKER()()MARKER' def fix_quotes(mdcontent): count = 0 quote_state = False quote_char = '`' quote_string = '' new_string = ''
quotes = [] for char in mdcontent: is_quote = char == quote_char if quote_state and not is_quote: quote_string += char if is_quote: count += 1 else: count = 0 if not quote_state: new_string += char if count == 3: if quote_state: new_string += MARKER quotes.append(quote_string.split('\n')[1:]) # for line in quote_string.split('\n')[1:]: # if line.strip() != '': # print("LINE *%s*" % line) # new_string += '\t' + line + '\n' quote_state = False else: quote_state = True quote_string = '' return new_string, quotes def fix_text(marked_text, quotes): new_string = '' quotes = quotes[::-1] for piece in marked_text.split(MARKER): new_string += piece try: quote = quotes.pop() except IndexError: pass else: for line in quote: if line.strip() != '': # print("LINE *%s*" % line) new_string += '\t' + line + '\n' return new_string def convert_markdown_file(input_file): # input_file = 'README.md' output_file = input_file.split('.')[0] with open(input_file, 'r') as rhandler: # markdown mdcontent = rhandler.read() marked_content, quotes = fix_quotes(mdcontent) # HTML html = markdown(marked_content) # text marked_text = html2text.html2text(html) text = fix_text(marked_text, quotes) with open(output_file, 'w') as whandler: whandler.write(text)
0003_job_run_time.py
# -*- coding: utf-8 -*- # Generated by Django 1.9.8 on 2016-08-19 05:37 from __future__ import unicode_literals import datetime from django.db import migrations, models from django.utils.timezone import utc class
(migrations.Migration): dependencies = [ ('jobs', '0002_remove_job_run_time'), ] operations = [ migrations.AddField( model_name='job', name='run_time', field=models.DateTimeField(default=datetime.datetime(2016, 8, 19, 5, 37, 14, 816610, tzinfo=utc), verbose_name='Time of the Last Run'), preserve_default=False, ), ]
Migration
mod.rs
use arbitrary::Arbitrary; use arbitrary::Unstructured; use holo_hash::*; use holochain_sqlite::prelude::*; use holochain_sqlite::rusqlite::Transaction; use holochain_state::validation_db::ValidationLimboStatus; use holochain_state::{mutations, prelude::test_in_mem_db}; use holochain_types::db_cache::*; use holochain_types::dht_op::{DhtOpLight, DhtOpType, OpOrder}; use holochain_zome_types::Create; use holochain_zome_types::ValidationStatus; use holochain_zome_types::{ Dna, Header, HeaderHashed, Signature, SignedHeaderHashed, Timestamp, NOISE, }; use std::collections::HashMap; use std::sync::Arc; fn insert_header_and_op(txn: &mut Transaction, u: &mut Unstructured, header: &Header) -> DhtOpHash { let timestamp = Timestamp::arbitrary(u).unwrap(); let op_order = OpOrder::new(DhtOpType::RegisterAgentActivity, timestamp); let any_hash: AnyDhtHash = EntryHash::arbitrary(u).unwrap().into(); let header = SignedHeaderHashed::with_presigned( HeaderHashed::from_content_sync(header.clone()), Signature::arbitrary(u).unwrap(), ); let hash = header.as_hash().clone(); let op_hash = DhtOpHash::arbitrary(u).unwrap(); mutations::insert_header(txn, &header).unwrap(); mutations::insert_op_lite( txn, &DhtOpLight::RegisterAgentActivity(hash, any_hash.clone()), &op_hash, &op_order, &timestamp, ) .unwrap(); op_hash } fn set_integrated(db: &DbWrite<DbKindDht>, u: &mut Unstructured, op_hash: &DhtOpHash)
fn set_ready_to_integrate(db: &DbWrite<DbKindDht>, op_hash: &DhtOpHash) { db.test_commit(|txn| { mutations::set_validation_stage(txn, op_hash, ValidationLimboStatus::AwaitingIntegration) .unwrap(); mutations::set_validation_status(txn, op_hash, ValidationStatus::Valid).unwrap(); }); } async fn check_state( cache: &DhtDbQueryCache, f: impl FnOnce(&HashMap<Arc<AgentPubKey>, ActivityState>), ) { cache.get_state().await.share_ref(|activity| f(activity)); } #[tokio::test(flavor = "multi_thread")] async fn cache_inits_correctly() { let mut u = Unstructured::new(&NOISE); let db = test_in_mem_db(DbKindDht(Arc::new(DnaHash::from_raw_32(vec![0; 32])))); let cache = DhtDbQueryCache::new(db.clone().into()); check_state(&cache, |activity| assert!(activity.is_empty())).await; let header = Header::Dna(Dna::arbitrary(&mut u).unwrap()); let author = header.author().clone(); let hash = HeaderHash::with_data_sync(&header); let op_hash = db.test_commit(|txn| insert_header_and_op(txn, &mut u, &header)); let cache = DhtDbQueryCache::new(db.clone().into()); check_state(&cache, |activity| assert!(activity.is_empty())).await; set_ready_to_integrate(&db, &op_hash); let cache = DhtDbQueryCache::new(db.clone().into()); check_state(&cache, |activity| { let b = activity.get(header.author()).unwrap(); assert_eq!(b.integrated, None); assert_eq!(b.ready_to_integrate, Some(0)); }) .await; let to_integrate = cache.get_activity_to_integrate().await.unwrap(); assert_eq!(to_integrate.len(), 1); assert_eq!(*to_integrate[0].0, *header.author()); assert_eq!(to_integrate[0].1, 0..=0); set_integrated(&db, &mut u, &op_hash); let cache = DhtDbQueryCache::new(db.clone().into()); check_state(&cache, |activity| { let b = activity.get(header.author()).unwrap(); assert_eq!(b.integrated, Some(0)); assert_eq!(b.ready_to_integrate, None); }) .await; let to_integrate = cache.get_activity_to_integrate().await.unwrap(); assert_eq!(to_integrate.len(), 0); let mut header = Create::arbitrary(&mut u).unwrap(); header.prev_header = hash.clone(); header.header_seq = 1; header.author = author.clone(); let header: Header = header.into(); let op_hash = db.test_commit(|txn| insert_header_and_op(txn, &mut u, &header)); let cache = DhtDbQueryCache::new(db.clone().into()); check_state(&cache, |activity| { let b = activity.get(header.author()).unwrap(); assert_eq!(b.integrated, Some(0)); assert_eq!(b.ready_to_integrate, None); }) .await; let to_integrate = cache.get_activity_to_integrate().await.unwrap(); assert_eq!(to_integrate.len(), 0); set_ready_to_integrate(&db, &op_hash); let cache = DhtDbQueryCache::new(db.clone().into()); check_state(&cache, |activity| { let b = activity.get(header.author()).unwrap(); assert_eq!(b.integrated, Some(0)); assert_eq!(b.ready_to_integrate, Some(1)); }) .await; let to_integrate = cache.get_activity_to_integrate().await.unwrap(); assert_eq!(to_integrate.len(), 1); assert_eq!(*to_integrate[0].0, *header.author()); assert_eq!(to_integrate[0].1, 1..=1); set_integrated(&db, &mut u, &op_hash); let cache = DhtDbQueryCache::new(db.clone().into()); check_state(&cache, |activity| { let b = activity.get(header.author()).unwrap(); assert_eq!(b.integrated, Some(1)); assert_eq!(b.ready_to_integrate, None); }) .await; let to_integrate = cache.get_activity_to_integrate().await.unwrap(); assert_eq!(to_integrate.len(), 0); } #[tokio::test(flavor = "multi_thread")] async fn cache_init_catches_gaps() { let mut u = Unstructured::new(&NOISE); let db = test_in_mem_db(DbKindDht(Arc::new(DnaHash::from_raw_32(vec![0; 32])))); let header = Header::Dna(Dna::arbitrary(&mut u).unwrap()); let hash = HeaderHash::with_data_sync(&header); let author = header.author().clone(); // Create the missing header so we can get the hash. let mut missing_header = Create::arbitrary(&mut u).unwrap(); missing_header.prev_header = hash; missing_header.header_seq = 1; missing_header.author = author.clone(); let missing_header: Header = missing_header.into(); let missing_hash = HeaderHash::with_data_sync(&missing_header); let mut op_hashes = db.test_commit(|txn| { let mut op_hashes = Vec::new(); op_hashes.push(insert_header_and_op(txn, &mut u, &header)); let mut header = Create::arbitrary(&mut u).unwrap(); header.prev_header = missing_hash; header.header_seq = 2; header.author = author.clone(); let header: Header = header.into(); op_hashes.push(insert_header_and_op(txn, &mut u, &header)); op_hashes }); set_ready_to_integrate(&db, &op_hashes[0]); let cache = DhtDbQueryCache::new(db.clone().into()); check_state(&cache, |activity| { let b = activity.get(header.author()).unwrap(); assert_eq!(b.integrated, None); assert_eq!(b.ready_to_integrate, Some(0)); }) .await; let to_integrate = cache.get_activity_to_integrate().await.unwrap(); assert_eq!(to_integrate.len(), 1); assert_eq!(*to_integrate[0].0, *header.author()); assert_eq!(to_integrate[0].1, 0..=0); set_integrated(&db, &mut u, &op_hashes[0]); set_ready_to_integrate(&db, &op_hashes[1]); let cache = DhtDbQueryCache::new(db.clone().into()); check_state(&cache, |activity| { let b = activity.get(header.author()).unwrap(); assert_eq!(b.integrated, Some(0)); assert_eq!(b.ready_to_integrate, None); }) .await; let to_integrate = cache.get_activity_to_integrate().await.unwrap(); assert_eq!(to_integrate.len(), 0); op_hashes.push(db.test_commit(|txn| insert_header_and_op(txn, &mut u, &missing_header))); let cache = DhtDbQueryCache::new(db.clone().into()); check_state(&cache, |activity| { let b = activity.get(header.author()).unwrap(); assert_eq!(b.integrated, Some(0)); assert_eq!(b.ready_to_integrate, None); }) .await; let to_integrate = cache.get_activity_to_integrate().await.unwrap(); assert_eq!(to_integrate.len(), 0); set_ready_to_integrate(&db, &op_hashes[2]); let cache = DhtDbQueryCache::new(db.clone().into()); check_state(&cache, |activity| { let b = activity.get(header.author()).unwrap(); assert_eq!(b.integrated, Some(0)); assert_eq!(b.ready_to_integrate, Some(2)); }) .await; let to_integrate = cache.get_activity_to_integrate().await.unwrap(); assert_eq!(to_integrate.len(), 1); assert_eq!(*to_integrate[0].0, *header.author()); assert_eq!(to_integrate[0].1, 1..=2); } #[tokio::test(flavor = "multi_thread")] async fn cache_set_integrated() { let mut u = Unstructured::new(&NOISE); let db = test_in_mem_db(DbKindDht(Arc::new(DnaHash::from_raw_32(vec![0; 32])))); let header = Header::Dna(Dna::arbitrary(&mut u).unwrap()); let author = header.author().clone(); db.test_commit(|txn| insert_header_and_op(txn, &mut u, &header)); let cache = DhtDbQueryCache::new(db.clone().into()); cache .set_activity_ready_to_integrate(&author, 0) .await .unwrap(); check_state(&cache, |activity| { let b = activity.get(&author).unwrap(); assert_eq!(b.integrated, None); assert_eq!(b.ready_to_integrate, Some(0)); }) .await; check_state(&cache, |activity| { dbg!(activity); }) .await; cache.set_activity_to_integrated(&author, 0).await.unwrap(); check_state(&cache, |activity| { dbg!(activity); let b = activity.get(&author).unwrap(); assert_eq!(b.integrated, Some(0)); assert_eq!(b.ready_to_integrate, None); }) .await; cache .set_activity_ready_to_integrate(&author, 1) .await .unwrap(); cache .set_activity_ready_to_integrate(&author, 2) .await .unwrap(); check_state(&cache, |activity| { let b = activity.get(&author).unwrap(); assert_eq!(b.integrated, Some(0)); assert_eq!(b.ready_to_integrate, Some(2)); }) .await; let to_integrate = cache.get_activity_to_integrate().await.unwrap(); assert_eq!(to_integrate.len(), 1); assert_eq!(*to_integrate[0].0, author); assert_eq!(to_integrate[0].1, 1..=2); cache.set_activity_to_integrated(&author, 1).await.unwrap(); check_state(&cache, |activity| { let b = activity.get(&author).unwrap(); assert_eq!(b.integrated, Some(1)); assert_eq!(b.ready_to_integrate, Some(2)); }) .await; let to_integrate = cache.get_activity_to_integrate().await.unwrap(); assert_eq!(to_integrate.len(), 1); assert_eq!(*to_integrate[0].0, author); assert_eq!(to_integrate[0].1, 2..=2); cache.set_activity_to_integrated(&author, 2).await.unwrap(); check_state(&cache, |activity| { let b = activity.get(&author).unwrap(); assert_eq!(b.integrated, Some(2)); assert_eq!(b.ready_to_integrate, None); }) .await; } #[tokio::test(flavor = "multi_thread")] async fn cache_set_all_integrated() { let mut u = Unstructured::new(&NOISE); let test_activity: Vec<_> = std::iter::repeat_with(|| { ( Arc::new(AgentPubKey::arbitrary(&mut u).unwrap()), 0..=(u.int_in_range(0..=u32::MAX).unwrap()), ) }) .take(1000) .collect(); let db = test_in_mem_db(DbKindDht(Arc::new(DnaHash::from_raw_32(vec![0; 32])))); let cache = DhtDbQueryCache::new(db.clone().into()); cache .set_all_activity_to_integrated(test_activity.clone()) .await .unwrap(); check_state(&cache, |activity| { for (author, seq_range) in &test_activity { let b = activity.get(author.as_ref()).unwrap(); assert_eq!(b.integrated, Some(*seq_range.end())); assert_eq!(b.ready_to_integrate, None); } }) .await; let test_activity: HashMap<_, _> = test_activity.into_iter().collect(); let to_integrate = cache.get_activity_to_integrate().await.unwrap(); for (author, seq_range) in to_integrate { let range = test_activity.get(&author).unwrap(); assert_eq!(*range, seq_range); } } #[tokio::test(flavor = "multi_thread")] async fn check_none_integrated_with_awaiting_deps() { let mut u = Unstructured::new(&NOISE); let author = Arc::new(AgentPubKey::arbitrary(&mut u).unwrap()); let db = test_in_mem_db(DbKindDht(Arc::new(DnaHash::from_raw_32(vec![0; 32])))); let cache = DhtDbQueryCache::new(db.clone().into()); cache .set_activity_ready_to_integrate(author.as_ref(), 3) .await .unwrap(); check_state(&cache, |activity| { let b = activity.get(author.as_ref()).unwrap(); assert_eq!(b.integrated, None); assert_eq!(b.ready_to_integrate, None); assert_eq!(b.awaiting_deps, vec![3]); }) .await; let to_integrate = cache.get_activity_to_integrate().await.unwrap(); assert!(to_integrate.is_empty()); }
{ db.test_commit(|txn| { mutations::set_validation_stage(txn, op_hash, ValidationLimboStatus::Pending).unwrap(); mutations::set_when_integrated(txn, op_hash, Timestamp::arbitrary(u).unwrap()).unwrap(); }); }
network_policy.go
// Copyright 2019 Antrea Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package openflow import ( "fmt" "net" "strconv" "k8s.io/klog" "github.com/vmware-tanzu/antrea/pkg/agent/types" "github.com/vmware-tanzu/antrea/pkg/apis/networking/v1beta1" secv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/security/v1alpha1" binding "github.com/vmware-tanzu/antrea/pkg/ovs/openflow" ) const ( MatchDstIP int = iota MatchSrcIP MatchDstIPNet MatchSrcIPNet MatchDstOFPort MatchSrcOFPort MatchTCPDstPort MatchUDPDstPort MatchSCTPDstPort Unsupported ) // IP address calculated from Pod's address. type IPAddress net.IP func (a *IPAddress) GetMatchKey(addrType types.AddressType) int { switch addrType { case types.SrcAddress: return MatchSrcIP case types.DstAddress: return MatchDstIP default: klog.Errorf("Unknown AddressType %d in IPAddress", addrType) return Unsupported } } func (a *IPAddress) GetMatchValue() string { addr := net.IP(*a) return addr.String() } func (a *IPAddress) GetValue() interface{} { return net.IP(*a) } func NewIPAddress(addr net.IP) *IPAddress { ia := IPAddress(addr) return &ia } // IP block calculated from Pod's address. type IPNetAddress net.IPNet func (a *IPNetAddress) GetMatchKey(addrType types.AddressType) int { switch addrType { case types.SrcAddress: return MatchSrcIPNet case types.DstAddress: return MatchDstIPNet default: klog.Errorf("Unknown AddressType %d in IPNetAddress", addrType) return Unsupported } } func (a *IPNetAddress) GetMatchValue() string { addr := net.IPNet(*a) return addr.String() } func (a *IPNetAddress) GetValue() interface{} { return net.IPNet(*a) } func NewIPNetAddress(addr net.IPNet) *IPNetAddress { ia := IPNetAddress(addr) return &ia } // OFPortAddress is the Openflow port of an interface. type OFPortAddress int32 func (a *OFPortAddress) GetMatchKey(addrType types.AddressType) int { switch addrType { case types.SrcAddress: // in_port is used in egress rule to match packets sent from local Pod. Service traffic is not covered by this // match, and source IP will be matched instead. return MatchSrcOFPort case types.DstAddress: return MatchDstOFPort default: klog.Errorf("Unknown AddressType %d in OFPortAddress", addrType) return Unsupported } } func (a *OFPortAddress) GetMatchValue() string { return fmt.Sprintf("%d", int32(*a)) } func (a *OFPortAddress) GetValue() interface{} { return int32(*a) } func NewOFPortAddress(addr int32) *OFPortAddress { a := OFPortAddress(addr) return &a } // ConjunctionNotFound is an error response when the specified policyRuleConjunction is not found from the local cache. type ConjunctionNotFound uint32 func (e *ConjunctionNotFound) Error() string { return fmt.Sprintf("policyRuleConjunction with ID %d not found", uint32(*e)) } func newConjunctionNotFound(conjunctionID uint32) *ConjunctionNotFound { err := ConjunctionNotFound(conjunctionID) return &err } // conjunctiveMatch generates match conditions for conjunctive match flow entry, including source or destination // IP address, ofport number of OVS interface, or Service port. When conjunctiveMatch is used to match IP // address or ofport number, matchProtocol is "ip". When conjunctiveMatch is used to match Service // port, matchProtocol is Service protocol. If Service protocol is not set, "tcp" is used by default. type conjunctiveMatch struct { tableID binding.TableIDType priority *uint16 matchKey int matchValue interface{} } func (m *conjunctiveMatch) generateGlobalMapKey() string { var valueStr, priorityStr string matchType := m.matchKey switch v := m.matchValue.(type) { case net.IP: // Use the unique format "x.x.x.x/xx" for IP address and IP net, to avoid generating two different global map // keys for IP and IP/32. Use MatchDstIPNet/MatchSrcIPNet as match type to generate global cache key for both IP // and IPNet. This is because OVS treats IP and IP/32 as the same condition, if Antrea has two different // conjunctive match flow contexts, only one flow entry is installed on OVS, and the conjunctive actions in the // first context wil be overwritten by those in the second one. valueStr = fmt.Sprintf("%s/32", v.String()) switch m.matchKey { case MatchDstIP: matchType = MatchDstIPNet case MatchSrcIP: matchType = MatchSrcIPNet } case net.IPNet: valueStr = v.String() default: // The default cases include the matchValue is a Service port or an ofport Number. valueStr = fmt.Sprintf("%s", m.matchValue) } if m.priority == nil { priorityStr = strconv.Itoa(int(priorityNormal)) } else { priorityStr = strconv.Itoa(int(*m.priority)) } return fmt.Sprintf("table:%d,priority:%s,type:%d,value:%s", m.tableID, priorityStr, matchType, valueStr) } // changeType is generally used to describe the change type of a conjMatchFlowContext. It is also used in "flowChange" // to describe the expected OpenFlow operation which needs to be applied on the OVS bridge, and used in "actionChange" // to describe the policyRuleConjunction is expected to be added to or removed from conjMatchFlowContext's actions. // The value of changeType could be creation, modification, and deletion. type changeType int const ( insertion changeType = iota modification deletion ) // flowChange stores the expected OpenFlow entry and flow operation type which need to be applied on the OVS bridge. // The "flow" in flowChange should be nil if there is no change on the OpenFlow entry. A possible case is that a // DENY-ALL rule is required by a policyRuleConjunction, the flowChange will update the in-memory cache, but will not // change on OVS. type flowChange struct { flow binding.Flow changeType changeType } // actionChange stores the changed action of the conjunctive match flow, and the change type. // The "action" in actionChange is not nil. type actionChange struct { action *conjunctiveAction changeType changeType } // conjunctiveAction generates the policyRuleConjunction action in Openflow entry. The flow action is like // policyRuleConjunction(conjID,clauseID/nClause) when it has been realized on the switch. type conjunctiveAction struct { conjID uint32 clauseID uint8 nClause uint8 } // conjMatchFlowContext generates conjunctive match flow entries for conjunctions share the same match conditions. // One conjMatchFlowContext is responsible for one specific conjunctive match flow entry. As the match condition // of the flow entry can be shared by different conjunctions, the realized Openflow entry might have multiple // conjunctive actions. If the dropTable is not nil, conjMatchFlowContext also installs a drop flow in the dropTable. type conjMatchFlowContext struct { // conjunctiveMatch describes the match condition of conjunctive match flow entry. *conjunctiveMatch // actions is a map from policyRuleConjunction ID to conjunctiveAction. It records all the conjunctive actions in // the conjunctive match flow. When the number of actions is reduced to 0, the conjMatchFlowContext.flow is // uninstalled from the switch. actions map[uint32]*conjunctiveAction // denyAllRules is a set to cache the "DENY-ALL" rules that is applied to the matching address in this context. denyAllRules map[uint32]bool client *client // flow is the conjunctive match flow built from this context. flow needs to be updated if actions are changed. flow binding.Flow // dropflow is the default drop flow built from this context to drop packets in the AppliedToGroup but not pass the // NetworkPolicy rule. dropFlow is installed on the switch as long as either actions or denyAllRules is not // empty, and uninstalled when both two are empty. When the dropFlow is uninstalled from the switch, the // conjMatchFlowContext is removed from the cache. dropFlow binding.Flow } // createOrUpdateConjunctiveMatchFlow creates or updates the conjunctive match flow with the latest actions. It returns // the flowChange including the changed OpenFlow entry and the expected operation which need to be applied on the OVS bridge. func (ctx *conjMatchFlowContext) createOrUpdateConjunctiveMatchFlow(actions []*conjunctiveAction, priority *uint16) *flowChange { // Check if flow is already installed. If not, create a new flow. if ctx.flow == nil { // Check the number of valid conjunctiveActions, and return nil immediately if it is 0. It happens when the match // condition is used only for matching AppliedToGroup, but no From or To is defined in the NetworkPolicy rule. if len(actions) == 0 { return nil } // Create the conjunctive match flow entry. The actions here should not be empty for either add or update case. // The expected operation for a new Openflow entry should be "insertion". flow := ctx.client.conjunctiveMatchFlow(ctx.tableID, ctx.matchKey, ctx.matchValue, priority, actions...) return &flowChange{ flow: flow, changeType: insertion, } } // Modify the existing Openflow entry and reset the actions. flowBuilder := ctx.flow.CopyToBuilder(0) for _, act := range actions { flowBuilder.Action().Conjunction(act.conjID, act.clauseID, act.nClause) } // The expected operation for an existing Openflow entry should be "modification". return &flowChange{ flow: flowBuilder.Done(), changeType: modification, } } // deleteAction deletes the specified policyRuleConjunction from conjunctiveMatchFlow's actions, and then returns the // flowChange. func (ctx *conjMatchFlowContext) deleteAction(conjID uint32, priority *uint16) *flowChange { // If the specified conjunctive action is the last one, delete the conjunctive match flow entry from the OVS bridge. // No need to check if the conjunction ID of the only conjunctive action is the specified ID or not, as it // has been checked in the caller. if len(ctx.actions) == 1 && ctx.flow != nil { return &flowChange{ flow: ctx.flow, changeType: deletion, } } else { // Modify the Openflow entry and reset the other conjunctive actions. var actions []*conjunctiveAction for _, act := range ctx.actions { if act.conjID != conjID { actions = append(actions, act) } } return ctx.createOrUpdateConjunctiveMatchFlow(actions, priority) } } // addAction adds the specified policyRuleConjunction into conjunctiveMatchFlow's actions, and then returns the flowChange. func (ctx *conjMatchFlowContext) addAction(action *conjunctiveAction, priority *uint16) *flowChange { // Check if the conjunction exists in conjMatchFlowContext actions or not. If yes, return nil immediately. _, found := ctx.actions[action.conjID] if found { return nil } // Append current conjunctive action to the existing actions, and then calculate the conjunctive match flow changes. actions := []*conjunctiveAction{action} for _, act := range ctx.actions { actions = append(actions, act) } return ctx.createOrUpdateConjunctiveMatchFlow(actions, priority) } func (ctx *conjMatchFlowContext) addDenyAllRule(ruleID uint32) { if ctx.denyAllRules == nil { ctx.denyAllRules = make(map[uint32]bool) } ctx.denyAllRules[ruleID] = true } func (ctx *conjMatchFlowContext) delDenyAllRule(ruleID uint32) { // Delete the DENY-ALL rule if it is in context denyAllRules. _, found := ctx.denyAllRules[ruleID] if found { delete(ctx.denyAllRules, ruleID) } } // conjMatchFlowContextChange describes the changes of a conjMatchFlowContext. It is generated when a policyRuleConjunction // is added, deleted, or the addresses in an existing policyRuleConjunction are changed. The changes are calculated first, // and then applied on the OVS bridge using a single Bundle, and lastly the local cache is updated. The local cahce // is updated only if conjMatchFlowContextChange is applied on the OVS bridge successfully. type conjMatchFlowContextChange struct { // context is the changed conjMatchFlowContext, which needs to be updated after the OpenFlow entries are applied to // the OVS bridge. context is not nil. context *conjMatchFlowContext // ctxChangeType is the changed type of the conjMatchFlowContext. The possible values are "creation", "modification" // and "deletion". Add the context into the globalConjMatchFlowCache if the ctxChangeType is "insertion", and remove // from the globalConjMatchFlowCache if it is "deletion". ctxChangeType changeType // matchFlow is the changed conjunctive match flow which needs to be realized on the OVS bridge. It is used to update // conjMatchFlowContext.flow. matchFlow is set if the conjunctive match flow needs to be updated on the OVS bridge, or // a DENY-ALL rule change is required by the policyRuleConjunction. matchFlow is nil if the policyRuleConjunction // is already added/removed in the conjMatchFlowContext's actions or denyAllRules. matchFlow *flowChange // dropFlow is the changed drop flow which needs to be realized on the OVS bridge. It is used to update // conjMatchFlowContext.dropFlow. dropFlow is set when the default drop flow needs to be added or removed on the OVS // bridge, and it is nil in other cases. dropFlow *flowChange // clause is the policyRuleConjunction's clause having current conjMatchFlowContextChange. It is used to update the // mapping relations between the policyRuleConjunction and the conjMatchFlowContext. Update the clause.matches after // the conjMatchFlowContextChange is realized on the OVS bridge. clause is not nil. clause *clause // actChange is the changed conjunctive action. It is used to update the conjMatchFlowContext's actions. actChange // is not nil. actChange *actionChange } // updateContextStatus changes conjMatchFlowContext's status, including, // 1) reset flow and dropFlow after the flow changes have been applied to the OVS bridge, // 2) modify the actions with the changed action, // 3) update the mapping of denyAllRules and corresponding policyRuleConjunction, // 4) add the new conjMatchFlowContext into the globalConjMatchFlowCache, or remove the deleted conjMatchFlowContext // from the globalConjMatchFlowCache. func (c *conjMatchFlowContextChange) updateContextStatus() { matcherKey := c.context.generateGlobalMapKey() // Update clause.matches with the conjMatchFlowContext, and update conjMatchFlowContext.actions with the changed // conjunctive action. changedAction := c.actChange.action switch c.actChange.changeType { case insertion: c.clause.matches[matcherKey] = c.context if changedAction != nil { c.context.actions[changedAction.conjID] = changedAction } case deletion: delete(c.clause.matches, matcherKey) if changedAction != nil { delete(c.context.actions, changedAction.conjID) } } // Update the match flow in the conjMatchFlowContext. There are two kinds of possible changes on the match flow: // 1) A conjunctive match flow change required by the policyRuleConjunction. // 2) A DENY-ALL rule required by the policyRuleConjunction. // For 1), conjMatchFlowContext.Flow should be updated with the conjMatchFlowContextChange.matchFlow.flow. // For 2), append or delete the conjunction ID from the conjMatchFlowContext's denyAllRules. if c.matchFlow != nil { switch c.matchFlow.changeType { case insertion: fallthrough case modification: if c.matchFlow.flow != nil { c.context.flow = c.matchFlow.flow } else { switch c.actChange.changeType { case insertion: c.context.addDenyAllRule(c.clause.action.conjID) case deletion: c.context.delDenyAllRule(c.clause.action.conjID) } } case deletion: if c.matchFlow.flow != nil { c.context.flow = nil } else { c.context.delDenyAllRule(c.clause.action.conjID) } } } // Update conjMatchFlowContext.dropFlow. if c.dropFlow != nil { switch c.dropFlow.changeType { case insertion: c.context.dropFlow = c.dropFlow.flow case deletion: c.context.dropFlow = nil } } // Update globalConjMatchFlowCache. Add the conjMatchFlowContext into the globalConjMatchFlowCache if the ctxChangeType // is "insertion", or delete from the globalConjMatchFlowCache if the ctxChangeType is "deletion". switch c.ctxChangeType { case insertion: c.context.client.globalConjMatchFlowCache[matcherKey] = c.context case deletion: delete(c.context.client.globalConjMatchFlowCache, matcherKey) } } // policyRuleConjunction is responsible to build Openflow entries for Pods that are in a NetworkPolicy rule's AppliedToGroup. // The Openflow entries include conjunction action flows, conjunctive match flows, and default drop flows in the dropTable. // NetworkPolicyController will make sure only one goroutine operates on a policyRuleConjunction. // 1) Conjunction action flows use policyRuleConjunction ID as match condition. policyRuleConjunction ID is the single // match condition for conjunction action flows to allow packets. If the NetworkPolicy rule has also configured excepts // in From or To, Openflow entries are installed only for diff IPBlocks between From/To and Excepts. These are added as // conjunctive match flows as described below. // 2) Conjunctive match flows adds conjunctive actions in Openflow entry, and they are grouped by clauses. The match // condition in one clause is one of these three types: from address(for fromClause), or to address(for toClause), or // service ports(for serviceClause) configured in the NetworkPolicy rule. Each conjunctive match flow entry is // maintained by one specific conjMatchFlowContext which is stored in globalConjMatchFlowCache, and shared by clauses // if they have the same match conditions. clause adds or deletes conjunctive action to conjMatchFlowContext actions. // A clause is hit if the packet matches any conjunctive match flow that are grouped by this clause. Conjunction // action flow is hit only if all clauses in the policyRuleConjunction are hit. // 3) Default drop flows are also maintained by conjMatchFlowContext. It is used to drop packets sent from or to the // AppliedToGroup but not pass the Network Policy rule. type policyRuleConjunction struct { id uint32 fromClause *clause toClause *clause serviceClause *clause actionFlows []binding.Flow // NetworkPolicy name and Namespace information for debugging usage. npName string npNamespace string } // clause groups conjunctive match flows. Matches in a clause represent source addresses(for fromClause), or destination // addresses(for toClause) or service ports(for serviceClause) in a NetworkPolicy rule. When the new address or service // port is added into the clause, it adds a new conjMatchFlowContext into globalConjMatchFlowCache (or finds the // existing one from globalConjMatchFlowCache), and then update the key of the conjunctiveMatch into its own matches. // When address is deleted from the clause, it deletes the conjunctive action from the conjMatchFlowContext, // and then deletes the key of conjunctiveMatch from its own matches. type clause struct { action *conjunctiveAction // matches is a map from the unique string generated from the conjunctiveMatch to conjMatchFlowContext. It is used // to cache conjunctive match conditions in the same clause. matches map[string]*conjMatchFlowContext // ruleTable is where to install conjunctive match flows. ruleTable binding.Table // dropTable is where to install Openflow entries to drop the packet sent to or from the AppliedToGroup but does not // satisfy any conjunctive match conditions. It should be nil, if the clause is used for matching service port. dropTable binding.Table } func (c *clause) addConjunctiveMatchFlow(client *client, match *conjunctiveMatch) *conjMatchFlowContextChange { matcherKey := match.generateGlobalMapKey() _, found := c.matches[matcherKey] if found { klog.V(2).Infof("Conjunctive match flow with matcher %s is already added in rule: %d", matcherKey, c.action.conjID) return nil } var context *conjMatchFlowContext ctxType := modification var dropFlow *flowChange // Get conjMatchFlowContext from globalConjMatchFlowCache. If it doesn't exist, create a new one and add into the cache. context, found = client.globalConjMatchFlowCache[matcherKey] if !found { context = &conjMatchFlowContext{ conjunctiveMatch: match, actions: make(map[uint32]*conjunctiveAction), client: client, } ctxType = insertion // Generate the default drop flow if dropTable is not nil and the default drop flow is not set yet. if c.dropTable != nil && context.dropFlow == nil { dropFlow = &flowChange{ flow: context.client.defaultDropFlow(c.dropTable.GetID(), match.matchKey, match.matchValue), changeType: insertion, } } } // Calculate the change on the conjMatchFlowContext. ctxChanges := &conjMatchFlowContextChange{ context: context, ctxChangeType: ctxType, clause: c, actChange: &actionChange{ changeType: insertion, }, dropFlow: dropFlow, } if c.action.nClause > 1 { // Append the conjunction to conjunctiveFlowContext's actions, and add the changed flow into the conjMatchFlowContextChange. flowChange := context.addAction(c.action, match.priority) if flowChange != nil { ctxChanges.matchFlow = flowChange ctxChanges.actChange.action = c.action } } else { // Set the flowChange type as "insertion" but do not set flowChange.Flow. In this case, the policyRuleConjunction should // be added into conjunctiveFlowContext's denyAllRules. ctxChanges.matchFlow = &flowChange{ changeType: insertion, } } return ctxChanges } func (c *clause) generateAddressConjMatch(addr types.Address, addrType types.AddressType, priority *uint16) *conjunctiveMatch { matchKey := addr.GetMatchKey(addrType) matchValue := addr.GetValue() match := &conjunctiveMatch{ tableID: c.ruleTable.GetID(), matchKey: matchKey, matchValue: matchValue, priority: priority, } return match } func getServiceMatchType(protocol *v1beta1.Protocol) int { switch *protocol { case v1beta1.ProtocolTCP: return MatchTCPDstPort case v1beta1.ProtocolUDP: return MatchUDPDstPort case v1beta1.ProtocolSCTP: return MatchSCTPDstPort default: return MatchTCPDstPort } } func (c *clause) generateServicePortConjMatch(port v1beta1.Service, priority *uint16) *conjunctiveMatch { matchKey := getServiceMatchType(port.Protocol) // Match all ports with the given protocol type if the matchValue is not specified (value is 0). matchValue := uint16(0) if port.Port != nil { matchValue = uint16(port.Port.IntVal) } match := &conjunctiveMatch{ tableID: c.ruleTable.GetID(), matchKey: matchKey, matchValue: matchValue, priority: priority, } return match } // addAddrFlows translates the specified addresses to conjunctiveMatchFlows, and returns the corresponding changes on the // conjunctiveMatchFlows. func (c *clause) addAddrFlows(client *client, addrType types.AddressType, addresses []types.Address, priority *uint16) []*conjMatchFlowContextChange { var conjMatchFlowContextChanges []*conjMatchFlowContextChange // Calculate Openflow changes for the added addresses. for _, addr := range addresses { match := c.generateAddressConjMatch(addr, addrType, priority) ctxChange := c.addConjunctiveMatchFlow(client, match) if ctxChange != nil { conjMatchFlowContextChanges = append(conjMatchFlowContextChanges, ctxChange) } } return conjMatchFlowContextChanges } // addServiceFlows translates the specified NetworkPolicyPorts to conjunctiveMatchFlow, and returns corresponding // conjMatchFlowContextChange. func (c *clause) addServiceFlows(client *client, ports []v1beta1.Service, priority *uint16) []*conjMatchFlowContextChange { var conjMatchFlowContextChanges []*conjMatchFlowContextChange for _, port := range ports { match := c.generateServicePortConjMatch(port, priority) ctxChange := c.addConjunctiveMatchFlow(client, match) conjMatchFlowContextChanges = append(conjMatchFlowContextChanges, ctxChange) } return conjMatchFlowContextChanges } // deleteConjunctiveMatchFlow deletes the specific conjunctiveAction from existing flow. func (c *clause) deleteConjunctiveMatchFlow(flowContextKey string) *conjMatchFlowContextChange { context, found := c.matches[flowContextKey] // Match is not located in clause cache. It happens if the conjMatchFlowContext is already deleted from clause local cache. if !found { return nil } ctxChange := &conjMatchFlowContextChange{ context: context, clause: c, ctxChangeType: modification, actChange: &actionChange{ changeType: deletion, }, } conjID := c.action.conjID expectedConjunctiveActions := len(context.actions) expectedDenyAllRules := len(context.denyAllRules) if c.action.nClause > 1 { // Delete the conjunctive action if it is in context actions. action, found := context.actions[conjID] if found { ctxChange.matchFlow = context.deleteAction(conjID, ctxChange.context.priority) ctxChange.actChange.action = action expectedConjunctiveActions-- } } else { // Delete the DENY-ALL rule if it is in context denyAllRules. ctxChange.matchFlow = &flowChange{ changeType: deletion, } expectedDenyAllRules-- } // Uninstall default drop flow if the deleted conjunctiveAction is the last action or the rule is the last one in // the denyAllRules. if expectedConjunctiveActions == 0 && expectedDenyAllRules == 0 { if context.dropFlow != nil { ctxChange.dropFlow = &flowChange{ flow: context.dropFlow, changeType: deletion, } } // Remove the context from global cache if the match condition is not used by either DENEY-ALL or the conjunctive // match flow. ctxChange.ctxChangeType = deletion } return ctxChange } // deleteAddrFlows deletes conjunctiveMatchFlow relevant to the specified addresses from local cache, // and uninstalls Openflow entry. func (c *clause) deleteAddrFlows(addrType types.AddressType, addresses []types.Address, priority *uint16) []*conjMatchFlowContextChange { var ctxChanges []*conjMatchFlowContextChange for _, addr := range addresses { match := c.generateAddressConjMatch(addr, addrType, priority) contextKey := match.generateGlobalMapKey() ctxChange := c.deleteConjunctiveMatchFlow(contextKey) if ctxChange != nil { ctxChanges = append(ctxChanges, ctxChange) } } return ctxChanges } // deleteAllMatches deletes all conjunctiveMatchFlow in the clause, and removes Openflow entry. deleteAllMatches // is always invoked when NetworkPolicy rule is deleted. func (c *clause) deleteAllMatches() []*conjMatchFlowContextChange { var ctxChanges []*conjMatchFlowContextChange for key := range c.matches { ctxChange := c.deleteConjunctiveMatchFlow(key) if ctxChange != nil { ctxChanges = append(ctxChanges, ctxChange) } } return ctxChanges } func (c *policyRuleConjunction) getAddressClause(addrType types.AddressType) *clause { switch addrType { case types.SrcAddress: return c.fromClause case types.DstAddress: return c.toClause default: klog.Errorf("no address clause use AddressType %d", addrType) return nil } } // InstallPolicyRuleFlows installs flows for a new NetworkPolicy rule. Rule should include all fields in the // NetworkPolicy rule. Each ingress/egress policy rule installs Openflow entries on two tables, one for ruleTable and // the other for dropTable. If a packet does not pass the ruleTable, it will be dropped by the dropTable. // NetworkPolicyController will make sure only one goroutine operates on a PolicyRule and addresses in the rule. // For a normal NetworkPolicy rule, these Openflow entries are installed: 1) 1 conjunction action flow; 2) multiple // conjunctive match flows, the flow number depends on addresses in rule.From and rule.To, or if // rule.FromExcepts/rule.ToExcepts are present, flow number is equal to diff of addresses between rule.From and // rule.FromExcepts, and diff addresses between rule.To and rule.ToExcepts, and in addition number includes service ports // in rule.Service; and 3) multiple default drop flows, the number is dependent on the addresses in rule.From for // an egress rule, and addresses in rule.To for an ingress rule. // For ALLOW-ALL rule, the Openflow entries installed on the switch are similar to a normal rule. The differences include, // 1) rule.Service is nil; and 2) rule.To has only one address "0.0.0.0/0" for egress rule, and rule.From is "0.0.0.0/0" // for ingress rule. // For DENY-ALL rule, only the default drop flow is installed for the addresses in rule.From for egress rule, or // addresses in rule.To for ingress rule. No conjunctive match flow or conjunction action except flows are installed. // A DENY-ALL rule is configured with rule.ID, rule.Direction, and either rule.From(egress rule) or rule.To(ingress rule). // Other fields in the rule should be nil. // If there is an error in any clause's addAddrFlows or addServiceFlows, the conjunction action flow will never be hit. // If the default drop flow is already installed before this error, all packets will be dropped by the default drop flow, // Otherwise all packets will be allowed. func (c *client) InstallPolicyRuleFlows(ruleID uint32, rule *types.PolicyRule, npName, npNamespace string) error { c.replayMutex.RLock() defer c.replayMutex.RUnlock() // Check if the policyRuleConjunction is added into cache or not. If yes, return nil. conj := c.getPolicyRuleConjunction(ruleID) if conj != nil { klog.V(2).Infof("PolicyRuleConjunction %d is already added in cache", ruleID) return nil } conj = &policyRuleConjunction{ id: ruleID, npName: npName, npNamespace: npNamespace} nClause, ruleTable, dropTable := conj.calculateClauses(rule, c) // Conjunction action flows are installed only if the number of clauses in the conjunction is > 1. It should be a rule // to drop all packets. If the number is 1, no conjunctive match flows or conjunction action flows are installed, // but the default drop flow is installed. if nClause > 1 { // Install action flows. var actionFlows []binding.Flow if rule.IsAntreaNetworkPolicyRule() && *rule.Action == secv1alpha1.RuleActionDrop { actionFlows = append(actionFlows, c.conjunctionActionDropFlow(ruleID, ruleTable.GetID(), rule.Priority)) } else { actionFlows = append(actionFlows, c.conjunctionActionFlow(ruleID, ruleTable.GetID(), dropTable.GetNext(), rule.Priority)) } if err := c.ofEntryOperations.AddAll(actionFlows); err != nil { return nil } // Add the action flows after the Openflow entries are installed on the OVS bridge successfully. conj.actionFlows = actionFlows } c.conjMatchFlowLock.Lock() defer c.conjMatchFlowLock.Unlock() // Calculate the conjMatchFlowContext changes. The changed Openflow entries are included in the conjMatchFlowContext change. ctxChanges := conj.calculateChangesForRuleCreation(c, rule) // Send the changed Openflow entries to the OVS bridge, and then update the conjMatchFlowContext as the expected status. if err := c.applyConjunctiveMatchFlows(ctxChanges); err != nil { return err } // Add the policyRuleConjunction into policyCache. c.policyCache.Add(conj) return nil } // applyConjunctiveMatchFlows installs OpenFlow entries on the OVS bridge, and then updates the conjMatchFlowContext. func (c *client) applyConjunctiveMatchFlows(flowChanges []*conjMatchFlowContextChange) error { // Send the OpenFlow entries to the OVS bridge. if err := c.sendConjunctiveMatchFlows(flowChanges); err != nil { return err } // Update conjunctiveMatchContext. for _, ctxChange := range flowChanges { ctxChange.updateContextStatus() } return nil } // sendConjunctiveMatchFlows sends all the changed OpenFlow entries to the OVS bridge in a single Bundle. func (c *client) sendConjunctiveMatchFlows(changes []*conjMatchFlowContextChange) error { var addFlows, modifyFlows, deleteFlows []binding.Flow var flowChanges []*flowChange for _, flowChange := range changes { if flowChange.matchFlow != nil { flowChanges = append(flowChanges, flowChange.matchFlow) } if flowChange.dropFlow != nil { flowChanges = append(flowChanges, flowChange.dropFlow) } } // Retrieve the OpenFlow entries from the flowChanges. for _, fc := range flowChanges { switch fc.changeType { case insertion: addFlows = append(addFlows, fc.flow) case modification: modifyFlows = append(modifyFlows, fc.flow) case deletion: deleteFlows = append(deleteFlows, fc.flow) } } return c.bridge.AddFlowsInBundle(addFlows, modifyFlows, deleteFlows) } // ActionFlowPriorities returns the OF priorities of the actionFlows in the policyRuleConjunction func (c *policyRuleConjunction) ActionFlowPriorities() []string { priorities := make([]string, 0, len(c.actionFlows)) for _, flow := range c.actionFlows { priorityStr := strconv.Itoa(int(flow.FlowPriority())) priorities = append(priorities, priorityStr) } return priorities } func (c *policyRuleConjunction) newClause(clauseID uint8, nClause uint8, ruleTable, dropTable binding.Table) *clause { return &clause{ ruleTable: ruleTable, dropTable: dropTable, matches: make(map[string]*conjMatchFlowContext, 0), action: &conjunctiveAction{ conjID: c.id, clauseID: clauseID, nClause: nClause, }, } } // calculateClauses configures the policyRuleConjunction's clauses according to the PolicyRule. The Openflow entries are // not installed on the OVS bridge when calculating the clauses. func (c *policyRuleConjunction) calculateClauses(rule *types.PolicyRule, clnt *client) (uint8, binding.Table, binding.Table) { var ruleTable, dropTable binding.Table var isEgressRule = false switch rule.Direction { case v1beta1.DirectionOut: if rule.IsAntreaNetworkPolicyRule() { ruleTable = clnt.pipeline[cnpEgressRuleTable] } else { ruleTable = clnt.pipeline[EgressRuleTable] } dropTable = clnt.pipeline[EgressDefaultTable] isEgressRule = true default: if rule.IsAntreaNetworkPolicyRule() { ruleTable = clnt.pipeline[cnpIngressRuleTable] } else { ruleTable = clnt.pipeline[IngressRuleTable] } dropTable = clnt.pipeline[IngressDefaultTable] } var fromID, toID, serviceID, nClause uint8 // Calculate clause IDs and the total number of clauses. if rule.From != nil { nClause++ fromID = nClause } if rule.To != nil { nClause++ toID = nClause } if rule.Service != nil { nClause++ serviceID = nClause } var defaultTable binding.Table if rule.From != nil { // deny rule does not need to be created for ClusterNetworkPolicies if !isEgressRule || rule.IsAntreaNetworkPolicyRule() { defaultTable = nil } else { defaultTable = dropTable } c.fromClause = c.newClause(fromID, nClause, ruleTable, defaultTable) } if rule.To != nil { if isEgressRule || rule.IsAntreaNetworkPolicyRule() { defaultTable = nil } else { defaultTable = dropTable } c.toClause = c.newClause(toID, nClause, ruleTable, defaultTable) } if rule.Service != nil { c.serviceClause = c.newClause(serviceID, nClause, ruleTable, nil) } return nClause, ruleTable, dropTable } // calculateChangesForRuleCreation returns the conjMatchFlowContextChanges of the new policyRuleConjunction. It // will calculate the expected conjMatchFlowContext status, and the changed Openflow entries. func (c *policyRuleConjunction) calculateChangesForRuleCreation(clnt *client, rule *types.PolicyRule) []*conjMatchFlowContextChange { var ctxChanges []*conjMatchFlowContextChange if c.fromClause != nil { ctxChanges = append(ctxChanges, c.fromClause.addAddrFlows(clnt, types.SrcAddress, rule.From, rule.Priority)...) } if c.toClause != nil { ctxChanges = append(ctxChanges, c.toClause.addAddrFlows(clnt, types.DstAddress, rule.To, rule.Priority)...) } if c.serviceClause != nil { ctxChanges = append(ctxChanges, c.serviceClause.addServiceFlows(clnt, rule.Service, rule.Priority)...) } return ctxChanges } // calculateChangesForRuleDeletion returns the conjMatchFlowContextChanges of the deleted policyRuleConjunction. It // will calculate the expected conjMatchFlowContext status, and the changed Openflow entries. func (c *policyRuleConjunction) calculateChangesForRuleDeletion() []*conjMatchFlowContextChange { var ctxChanges []*conjMatchFlowContextChange if c.fromClause != nil { ctxChanges = append(ctxChanges, c.fromClause.deleteAllMatches()...) } if c.toClause != nil { ctxChanges = append(ctxChanges, c.toClause.deleteAllMatches()...) } if c.serviceClause != nil { ctxChanges = append(ctxChanges, c.serviceClause.deleteAllMatches()...) } return ctxChanges } // getAllFlowKeys returns the matching strings of actions flows of // policyRuleConjunction, as well as matching flows of all its clauses. func (c *policyRuleConjunction) getAllFlowKeys() []string { flowKeys := []string{} dropFlowKeys := []string{} for _, flow := range c.actionFlows { flowKeys = append(flowKeys, flow.MatchString()) } addClauseFlowKeys := func(clause *clause) { if clause == nil { return } for _, ctx := range clause.matches { if ctx.flow != nil { flowKeys = append(flowKeys, ctx.flow.MatchString()) } if ctx.dropFlow != nil { dropFlowKeys = append(dropFlowKeys, ctx.dropFlow.MatchString()) } } } addClauseFlowKeys(c.fromClause) addClauseFlowKeys(c.toClause) addClauseFlowKeys(c.serviceClause) // Add flows in the order of action flows, conjunctive match flows, drop flows. return append(flowKeys, dropFlowKeys...) } func (c *client) getPolicyRuleConjunction(ruleID uint32) *policyRuleConjunction { conj, found, _ := c.policyCache.GetByKey(string(ruleID)) if !found { return nil } return conj.(*policyRuleConjunction) } func (c *client) GetPolicyFromConjunction(ruleID uint32) (string, string) { conjunction := c.getPolicyRuleConjunction(ruleID) if conjunction == nil { return "", "" } return conjunction.npName, conjunction.npNamespace } // UninstallPolicyRuleFlows removes the Openflow entry relevant to the specified NetworkPolicy rule. // It also returns a slice of stale ofPriorities used by ClusterNetworkPolicies. // UninstallPolicyRuleFlows will do nothing if no Openflow entry for the rule is installed. func (c *client) UninstallPolicyRuleFlows(ruleID uint32) ([]string, error) { c.replayMutex.RLock() defer c.replayMutex.RUnlock() conj := c.getPolicyRuleConjunction(ruleID) if conj == nil { klog.V(2).Infof("policyRuleConjunction with ID %d not found", ruleID) return nil, nil } ofPrioritiesToUninstallFlows := conj.ActionFlowPriorities() klog.V(2).Infof("Old priority %v found", ofPrioritiesToUninstallFlows) var staleOFPriorities []string // Delete action flows from the OVS bridge. if err := c.ofEntryOperations.DeleteAll(conj.actionFlows); err != nil { return nil, err } c.conjMatchFlowLock.Lock() defer c.conjMatchFlowLock.Unlock() // Get the conjMatchFlowContext changes. ctxChanges := conj.calculateChangesForRuleDeletion() // Send the changed OpenFlow entries to the OVS bridge and update the conjMatchFlowContext. if err := c.applyConjunctiveMatchFlows(ctxChanges); err != nil { return nil, err } for _, p := range ofPrioritiesToUninstallFlows { conjsStalePriority, _ := c.policyCache.ByIndex(priorityIndex, p) if len(conjsStalePriority) == 0 { klog.V(2).Infof("ofPriority %v is now stale", p) staleOFPriorities = append(staleOFPriorities, p) } } c.policyCache.Delete(conj) return staleOFPriorities, nil } func (c *client) replayPolicyFlows() { var flows []binding.Flow addActionFlows := func(conj *policyRuleConjunction) { for _, flow := range conj.actionFlows { flow.Reset() flows = append(flows, flow) } } for _, conj := range c.policyCache.List() { addActionFlows(conj.(*policyRuleConjunction)) } addMatchFlows := func(ctx *conjMatchFlowContext) { if ctx.dropFlow != nil { ctx.dropFlow.Reset() flows = append(flows, ctx.dropFlow) } if ctx.flow != nil { ctx.flow.Reset() flows = append(flows, ctx.flow) } } for _, ctx := range c.globalConjMatchFlowCache { addMatchFlows(ctx) } if err := c.ofEntryOperations.AddAll(flows); err != nil { klog.Errorf("Error when replaying flows: %v", err) } } // AddPolicyRuleAddress adds one or multiple addresses to the specified NetworkPolicy rule. If addrType is srcAddress, the // addresses are added to PolicyRule.From, else to PolicyRule.To.
func (c *client) AddPolicyRuleAddress(ruleID uint32, addrType types.AddressType, addresses []types.Address, priority *uint16) error { c.replayMutex.RLock() defer c.replayMutex.RUnlock() conj := c.getPolicyRuleConjunction(ruleID) // If policyRuleConjunction doesn't exist in client's policyCache return not found error. It should not happen, since // NetworkPolicyController will guarantee the policyRuleConjunction is created before this method is called. The check // here is for safety. if conj == nil { return newConjunctionNotFound(ruleID) } var clause = conj.getAddressClause(addrType) // Check if the clause is nil or not. The clause is nil if the addrType is an unsupported type. if clause == nil { return fmt.Errorf("no clause is using addrType %d", addrType) } c.conjMatchFlowLock.Lock() defer c.conjMatchFlowLock.Unlock() flowChanges := clause.addAddrFlows(c, addrType, addresses, priority) return c.applyConjunctiveMatchFlows(flowChanges) } // DeletePolicyRuleAddress removes addresses from the specified NetworkPolicy rule. If addrType is srcAddress, the addresses // are removed from PolicyRule.From, else from PolicyRule.To. func (c *client) DeletePolicyRuleAddress(ruleID uint32, addrType types.AddressType, addresses []types.Address, priority *uint16) error { c.replayMutex.RLock() defer c.replayMutex.RUnlock() conj := c.getPolicyRuleConjunction(ruleID) // If policyRuleConjunction doesn't exist in client's policyCache return not found error. It should not happen, since // NetworkPolicyController will guarantee the policyRuleConjunction is created before this method is called. The check // here is for safety. if conj == nil { return newConjunctionNotFound(ruleID) } var clause = conj.getAddressClause(addrType) // Check if the clause is nil or not. The clause is nil if the addrType is an unsupported type. if clause == nil { return fmt.Errorf("no clause is using addrType %d", addrType) } c.conjMatchFlowLock.Lock() defer c.conjMatchFlowLock.Unlock() // Remove policyRuleConjunction to actions of conjunctive match using specific address. changes := clause.deleteAddrFlows(addrType, addresses, priority) // Update the Openflow entries on the OVS bridge, and update local cache. return c.applyConjunctiveMatchFlows(changes) } func (c *client) GetNetworkPolicyFlowKeys(npName, npNamespace string) []string { flowKeys := []string{} // Hold replayMutex write lock to protect flows from being modified by // NetworkPolicy updates and replayPolicyFlows. This is more for logic // cleanliness, as: for now flow updates do not impact the matching string // generation; NetworkPolicy updates do not change policyRuleConjunction.actionFlows; // and last for protection of clause flows, conjMatchFlowLock is good enough. c.replayMutex.Lock() defer c.replayMutex.Unlock() for _, conjObj := range c.policyCache.List() { conj := conjObj.(*policyRuleConjunction) if conj.npName == npName && conj.npNamespace == npNamespace { // There can be duplicated flows added due to conjunctive matches // shared by multiple policy rules (clauses). flowKeys = append(flowKeys, conj.getAllFlowKeys()...) } } return flowKeys } // flowUpdates stores updates to the actionFlows and matchFlows in a policyRuleConjunction. type flowUpdates struct { newActionFlows []binding.Flow newPriority uint16 } // getMatchFlowUpdates calculates the update for conjuctiveMatchFlows in a policyRuleConjunction to be // installed on a new priority. func (c *client) getMatchFlowUpdates(conj *policyRuleConjunction, newPriority uint16) (add, del []binding.Flow) { allClause := []*clause{conj.fromClause, conj.toClause, conj.serviceClause} for _, c := range allClause { for _, ctx := range c.matches { f := ctx.flow updatedFlow := f.ToBuilder(). MatchPriority(newPriority). Done() add = append(add, updatedFlow) del = append(del, f) } } return add, del } // processFlowUpdates identifies the update cases in flow adds and deletes. // For conjunctiveMatchFlow updates, the following scenario is possible: // A flow {priority=100,ip,reg1=0x1f action=conjunction(1,1/3)} need to be re-assigned priority=99. // In this case, an addFlow of <priority=99,ip,reg1=0x1f> and delFlow <priority=100,ip,reg1=0x1f> will be issued. // At the same time, another flow {priority=99,ip,reg1=0x1f action=conjunction(2,1/3)} exists and now needs to // be re-assigned priority 98. This operation will issue a delFlow <priority=99,ip,reg1=0x1f>, which // would essentially void the add flow for conj=1. // In this case, we remove the conflicting delFlow and set addFlow as a modifyFlow. func (c *client) processFlowUpdates(addFlows, delFlows []binding.Flow) (add, update, del []binding.Flow) { for _, a := range addFlows { matched := false for i := 0; i < len(delFlows); i++ { if a.FlowPriority() == delFlows[i].FlowPriority() && a.MatchString() == delFlows[i].MatchString() { matched = true // treat the addFlow as update update = append(update, a) // remove the delFlow from the list delFlows = append(delFlows[:i], delFlows[i+1:]...) // reset list index as delFlows[i] is removed i-- } } if !matched { add = append(add, a) } } del = delFlows return add, update, del } // updateConjunctionActionFlows constructs a new policyRuleConjunction with actionFlows updated to be // stored in the policyCache. func (c *client) updateConjunctionActionFlows(conj *policyRuleConjunction, updates flowUpdates) *policyRuleConjunction { newActionFlows := make([]binding.Flow, len(conj.actionFlows)) copy(newActionFlows, updates.newActionFlows) newConj := &policyRuleConjunction{ id: conj.id, fromClause: conj.fromClause, toClause: conj.toClause, serviceClause: conj.serviceClause, actionFlows: newActionFlows, npName: conj.npName, npNamespace: conj.npNamespace, } return newConj } // updateConjunctionMatchFlows updates the conjuctiveMatchFlows in a policyRuleConjunction. func (c *client) updateConjunctionMatchFlows(conj *policyRuleConjunction, newPriority uint16) { allClause := []*clause{conj.fromClause, conj.toClause, conj.serviceClause} for _, clause := range allClause { for i, ctx := range clause.matches { delete(c.globalConjMatchFlowCache, ctx.generateGlobalMapKey()) f := ctx.flow updatedFlow := f.ToBuilder(). MatchPriority(newPriority). Done() clause.matches[i].flow = updatedFlow clause.matches[i].priority = &newPriority } // update the globalConjMatchFlowCache so that the keys are updated for _, ctx := range clause.matches { c.globalConjMatchFlowCache[ctx.generateGlobalMapKey()] = ctx } } } // calculateFlowUpdates calculates the flow updates required for the priority re-assignments specified in the input map. func (c *client) calculateFlowUpdates(updates map[uint16]uint16) (addFlows, delFlows []binding.Flow, conjFlowUpdates map[uint32]flowUpdates) { conjFlowUpdates = map[uint32]flowUpdates{} for original, newPriority := range updates { originalPriorityStr := strconv.Itoa(int(original)) conjs, _ := c.policyCache.ByIndex(priorityIndex, originalPriorityStr) klog.V(4).Infof("%d policyRuleConjunctions have flows installed at priority %v previously", len(conjs), originalPriorityStr) for _, conjObj := range conjs { conj := conjObj.(*policyRuleConjunction) for _, actionFlow := range conj.actionFlows { flowPriority := actionFlow.FlowPriority() if flowPriority == original { // The OF flow was created at the priority which need to be re-installed // at the NewPriority now updatedFlow := actionFlow. ToBuilder(). MatchPriority(newPriority). Done() addFlows = append(addFlows, updatedFlow) delFlows = append(delFlows, actionFlow) // Store the actionFlow update to the policyRuleConjunction and update all // policyRuleConjunctions if flow installation is successful. conjFlowUpdates[conj.id] = flowUpdates{ append(conjFlowUpdates[conj.id].newActionFlows, updatedFlow), newPriority, } } } matchFlowAdd, matchFlowDel := c.getMatchFlowUpdates(conj, newPriority) addFlows = append(addFlows, matchFlowAdd...) delFlows = append(delFlows, matchFlowDel...) } } return addFlows, delFlows, conjFlowUpdates } // ReassignFlowPriorities takes a list of priority updates, and update the actionFlows to replace // the old priority with the desired one, for each priority update. func (c *client) ReassignFlowPriorities(updates map[uint16]uint16) error { addFlows, delFlows, conjFlowUpdates := c.calculateFlowUpdates(updates) add, update, del := c.processFlowUpdates(addFlows, delFlows) // Commit the flows updates calculated. err := c.bridge.AddFlowsInBundle(add, update, del) if err != nil { return err } for conjID, actionUpdates := range conjFlowUpdates { originalConj, _, _ := c.policyCache.GetByKey(string(conjID)) conj := originalConj.(*policyRuleConjunction) updatedConj := c.updateConjunctionActionFlows(conj, actionUpdates) c.updateConjunctionMatchFlows(updatedConj, actionUpdates.newPriority) c.policyCache.Update(updatedConj) } return nil }
create_test.go
package bindings_test import ( "time" "github.com/containers/podman/v3/pkg/bindings/containers" "github.com/containers/podman/v3/pkg/specgen" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/onsi/gomega/gexec" ) var _ = Describe("Create containers ", func() { var ( bt *bindingTest s *gexec.Session
) BeforeEach(func() { bt = newBindingTest() bt.RestoreImagesFromCache() s = bt.startAPIService() time.Sleep(1 * time.Second) err := bt.NewConnection() Expect(err).To(BeNil()) }) AfterEach(func() { s.Kill() bt.cleanup() }) It("create a container running top", func() { s := specgen.NewSpecGenerator(alpine.name, false) s.Command = []string{"top"} s.Terminal = true s.Name = "top" ctr, err := containers.CreateWithSpec(bt.conn, s, nil) Expect(err).To(BeNil()) data, err := containers.Inspect(bt.conn, ctr.ID, nil) Expect(err).To(BeNil()) Expect(data.Name).To(Equal("top")) err = containers.Start(bt.conn, ctr.ID, nil) Expect(err).To(BeNil()) data, err = containers.Inspect(bt.conn, ctr.ID, nil) Expect(err).To(BeNil()) Expect(data.State.Status).To(Equal("running")) }) })
obj.js
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; exports.isObjEmpty = isObjEmpty; exports.cloneop = cloneop; exports.isObject = isObject; //判断是不是空对象 function isObjEmpty(obj) { return JSON.stringify(obj) == "{}"; } //对象类 //比较两个对象是否一样、 var diffObj = exports.diffObj = function diffObj(obj1, obj2) { return JSON.stringify(obj1) !== JSON.stringify(obj2); }; //深拷贝!!!!!,可以是对象也可以是数组 function cloneop(obj) { var copy = void 0; // Handle the 3 simple types, and null or
ned if (null == obj || 'object' != (typeof obj === "undefined" ? "undefined" : _typeof(obj))) return obj; // Handle Date if (obj instanceof Date) { copy = new Date(); copy.setTime(obj.getTime()); return copy; } // Handle Array if (obj instanceof Array) { copy = []; for (var i = 0, len = obj.length; i < len; i++) { copy[i] = cloneop(obj[i]); } return copy; } // Handle Object if (obj instanceof Object) { copy = {}; for (var attr in obj) { if (obj.hasOwnProperty(attr)) copy[attr] = cloneop(obj[attr]); } return copy; } throw new Error("Unable to copy obj! Its type isn't supported."); } //判断是否是对象 function isObject(obj) { return Object.prototype.toString.call(obj) === '[object Object]'; } //去除对象中指定的属性 var deleteObjKey = exports.deleteObjKey = function deleteObjKey() { var arr = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : []; var obj = arguments[1]; var _iteratorNormalCompletion = true; var _didIteratorError = false; var _iteratorError = undefined; try { for (var _iterator = arr[Symbol.iterator](), _step; !(_iteratorNormalCompletion = (_step = _iterator.next()).done); _iteratorNormalCompletion = true) { var i = _step.value; delete obj[i]; } } catch (err) { _didIteratorError = true; _iteratorError = err; } finally { try { if (!_iteratorNormalCompletion && _iterator.return) { _iterator.return(); } } finally { if (_didIteratorError) { throw _iteratorError; } } } return obj; };
undefi
data_type.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //! Data types that connect Parquet physical types with their Rust-specific //! representations. use std::cmp::Ordering; use std::fmt; use std::mem; use std::ops::{Deref, DerefMut}; use std::str::from_utf8; use byteorder::{BigEndian, ByteOrder}; use crate::basic::Type; use crate::column::reader::{ColumnReader, ColumnReaderImpl}; use crate::column::writer::{ColumnWriter, ColumnWriterImpl}; use crate::errors::{ParquetError, Result}; use crate::util::{ bit_util::{from_ne_slice, FromBytes}, memory::{ByteBuffer, ByteBufferPtr}, }; /// Rust representation for logical type INT96, value is backed by an array of `u32`. /// The type only takes 12 bytes, without extra padding. #[derive(Clone, Debug, PartialOrd)] pub struct Int96 { value: Option<[u32; 3]>, } impl Int96 { /// Creates new INT96 type struct with no data set. pub fn new() -> Self { Self { value: None } } /// Returns underlying data as slice of [`u32`]. #[inline] pub fn data(&self) -> &[u32] { self.value .as_ref() .expect("set_data should have been called") } /// Sets data for this INT96 type. #[inline] pub fn set_data(&mut self, elem0: u32, elem1: u32, elem2: u32) { self.value = Some([elem0, elem1, elem2]); } /// Converts this INT96 into an i64 representing the number of MILLISECONDS since Epoch pub fn to_i64(&self) -> i64 { const JULIAN_DAY_OF_EPOCH: i64 = 2_440_588; const SECONDS_PER_DAY: i64 = 86_400; const MILLIS_PER_SECOND: i64 = 1_000; let day = self.data()[2] as i64; let nanoseconds = ((self.data()[1] as i64) << 32) + self.data()[0] as i64; let seconds = (day - JULIAN_DAY_OF_EPOCH) * SECONDS_PER_DAY; seconds * MILLIS_PER_SECOND + nanoseconds / 1_000_000 } } impl Default for Int96 { fn default() -> Self { Self { value: None } } } impl PartialEq for Int96 { fn eq(&self, other: &Int96) -> bool { match (&self.value, &other.value) { (Some(v1), Some(v2)) => v1 == v2, (None, None) => true, _ => false, } } } impl From<Vec<u32>> for Int96 { fn from(buf: Vec<u32>) -> Self { assert_eq!(buf.len(), 3); let mut result = Self::new(); result.set_data(buf[0], buf[1], buf[2]); result } } impl fmt::Display for Int96 { #[cold] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self.data()) } } /// Rust representation for BYTE_ARRAY and FIXED_LEN_BYTE_ARRAY Parquet physical types. /// Value is backed by a byte buffer. #[derive(Clone)] pub struct ByteArray { data: Option<ByteBufferPtr>, } // Special case Debug that prints out byte arrays that are vaid utf8 as &str's impl std::fmt::Debug for ByteArray { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut debug_struct = f.debug_struct("ByteArray"); match self.as_utf8() { Ok(s) => debug_struct.field("data", &s), Err(_) => debug_struct.field("data", &self.data), }; debug_struct.finish() } } impl PartialOrd for ByteArray { fn partial_cmp(&self, other: &ByteArray) -> Option<Ordering> { if self.data.is_some() && other.data.is_some() { match self.len().cmp(&other.len()) { Ordering::Greater => Some(Ordering::Greater), Ordering::Less => Some(Ordering::Less), Ordering::Equal => { for (v1, v2) in self.data().iter().zip(other.data().iter()) { match v1.cmp(v2) { Ordering::Greater => return Some(Ordering::Greater), Ordering::Less => return Some(Ordering::Less), _ => {} } } Some(Ordering::Equal) } } } else { None } } } impl ByteArray { /// Creates new byte array with no data set. #[inline] pub fn new() -> Self { ByteArray { data: None } } /// Gets length of the underlying byte buffer. #[inline] pub fn len(&self) -> usize { assert!(self.data.is_some()); self.data.as_ref().unwrap().len() } /// Checks if the underlying buffer is empty. #[inline] pub fn is_empty(&self) -> bool { self.len() == 0 } /// Returns slice of data. #[inline] pub fn data(&self) -> &[u8] { self.data .as_ref() .expect("set_data should have been called") .as_ref() } /// Set data from another byte buffer. #[inline] pub fn set_data(&mut self, data: ByteBufferPtr) { self.data = Some(data); } /// Returns `ByteArray` instance with slice of values for a data. #[inline] pub fn slice(&self, start: usize, len: usize) -> Self { Self::from( self.data .as_ref() .expect("set_data should have been called") .range(start, len), ) } pub fn as_utf8(&self) -> Result<&str> { self.data .as_ref() .map(|ptr| ptr.as_ref()) .ok_or_else(|| general_err!("Can't convert empty byte array to utf8")) .and_then(|bytes| from_utf8(bytes).map_err(|e| e.into())) } } impl From<Vec<u8>> for ByteArray { fn from(buf: Vec<u8>) -> ByteArray { Self { data: Some(ByteBufferPtr::new(buf)), } } } impl<'a> From<&'a str> for ByteArray { fn from(s: &'a str) -> ByteArray { let mut v = Vec::new(); v.extend_from_slice(s.as_bytes()); Self { data: Some(ByteBufferPtr::new(v)), } } } impl From<ByteBufferPtr> for ByteArray { fn from(ptr: ByteBufferPtr) -> ByteArray { Self { data: Some(ptr) } } } impl From<ByteBuffer> for ByteArray { fn from(mut buf: ByteBuffer) -> ByteArray { Self { data: Some(buf.consume()), } } } impl Default for ByteArray { fn default() -> Self { ByteArray { data: None } } } impl PartialEq for ByteArray { fn eq(&self, other: &ByteArray) -> bool { match (&self.data, &other.data) { (Some(d1), Some(d2)) => d1.as_ref() == d2.as_ref(), (None, None) => true, _ => false, } } } impl fmt::Display for ByteArray { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self.data()) } } /// Wrapper type for performance reasons, this represents `FIXED_LEN_BYTE_ARRAY` but in all other /// considerations behaves the same as `ByteArray` /// /// # Performance notes: /// This type is a little unfortunate, without it the compiler generates code that takes quite a /// big hit on the CPU pipeline. Essentially the previous version stalls awaiting the result of /// `T::get_physical_type() == Type::FIXED_LEN_BYTE_ARRAY`. /// /// Its debatable if this is wanted, it is out of spec for what parquet documents as its base /// types, although there are code paths in the Rust (and potentially the C++) versions that /// warrant this. /// /// With this wrapper type the compiler generates more targetted code paths matching the higher /// level logical types, removing the data-hazard from all decoding and encoding paths. #[repr(transparent)] #[derive(Clone, Debug, Default)] pub struct FixedLenByteArray(ByteArray); impl PartialEq for FixedLenByteArray { fn eq(&self, other: &FixedLenByteArray) -> bool { self.0.eq(&other.0) } } impl PartialEq<ByteArray> for FixedLenByteArray { fn eq(&self, other: &ByteArray) -> bool { self.0.eq(other) } } impl PartialEq<FixedLenByteArray> for ByteArray { fn eq(&self, other: &FixedLenByteArray) -> bool { self.eq(&other.0) } } impl fmt::Display for FixedLenByteArray { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } impl PartialOrd for FixedLenByteArray { fn partial_cmp(&self, other: &FixedLenByteArray) -> Option<Ordering> { self.0.partial_cmp(&other.0) } } impl PartialOrd<FixedLenByteArray> for ByteArray { fn partial_cmp(&self, other: &FixedLenByteArray) -> Option<Ordering> { self.partial_cmp(&other.0) } } impl PartialOrd<ByteArray> for FixedLenByteArray { fn partial_cmp(&self, other: &ByteArray) -> Option<Ordering> { self.0.partial_cmp(other) } } impl Deref for FixedLenByteArray { type Target = ByteArray; fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for FixedLenByteArray { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<ByteArray> for FixedLenByteArray { fn from(other: ByteArray) -> Self { Self(other) } } impl From<FixedLenByteArray> for ByteArray { fn from(other: FixedLenByteArray) -> Self { other.0 } } /// Rust representation for Decimal values. /// /// This is not a representation of Parquet physical type, but rather a wrapper for /// DECIMAL logical type, and serves as container for raw parts of decimal values: /// unscaled value in bytes, precision and scale. #[derive(Clone, Debug)] pub enum Decimal { /// Decimal backed by `i32`. Int32 { value: [u8; 4], precision: i32, scale: i32, }, /// Decimal backed by `i64`. Int64 { value: [u8; 8], precision: i32, scale: i32, }, /// Decimal backed by byte array. Bytes { value: ByteArray, precision: i32, scale: i32, }, } impl Decimal { /// Creates new decimal value from `i32`. pub fn from_i32(value: i32, precision: i32, scale: i32) -> Self { let mut bytes = [0; 4]; BigEndian::write_i32(&mut bytes, value); Decimal::Int32 { value: bytes, precision, scale, } } /// Creates new decimal value from `i64`. pub fn from_i64(value: i64, precision: i32, scale: i32) -> Self { let mut bytes = [0; 8]; BigEndian::write_i64(&mut bytes, value); Decimal::Int64 { value: bytes, precision, scale, } } /// Creates new decimal value from `ByteArray`. pub fn from_bytes(value: ByteArray, precision: i32, scale: i32) -> Self { Decimal::Bytes { value, precision, scale, } } /// Returns bytes of unscaled value. pub fn data(&self) -> &[u8] { match *self { Decimal::Int32 { ref value, .. } => value, Decimal::Int64 { ref value, .. } => value, Decimal::Bytes { ref value, .. } => value.data(), } } /// Returns decimal precision. pub fn precision(&self) -> i32 { match *self { Decimal::Int32 { precision, .. } => precision, Decimal::Int64 { precision, .. } => precision, Decimal::Bytes { precision, .. } => precision, } } /// Returns decimal scale. pub fn scale(&self) -> i32 { match *self { Decimal::Int32 { scale, .. } => scale, Decimal::Int64 { scale, .. } => scale, Decimal::Bytes { scale, .. } => scale, } } } impl Default for Decimal { fn default() -> Self { Self::from_i32(0, 0, 0) } } impl PartialEq for Decimal { fn eq(&self, other: &Decimal) -> bool { self.precision() == other.precision() && self.scale() == other.scale() && self.data() == other.data() } } /// Converts an instance of data type to a slice of bytes as `u8`. pub trait AsBytes { /// Returns slice of bytes for this data type. fn as_bytes(&self) -> &[u8]; } /// Converts an slice of a data type to a slice of bytes. pub trait SliceAsBytes: Sized { /// Returns slice of bytes for a slice of this data type. fn slice_as_bytes(self_: &[Self]) -> &[u8]; /// Return the internal representation as a mutable slice /// /// # Safety /// If modified you are _required_ to ensure the internal representation /// is valid and correct for the actual raw data unsafe fn slice_as_bytes_mut(self_: &mut [Self]) -> &mut [u8]; } impl AsBytes for [u8] { fn as_bytes(&self) -> &[u8] { self } } macro_rules! gen_as_bytes { ($source_ty:ident) => { impl AsBytes for $source_ty { #[allow(clippy::size_of_in_element_count)] fn as_bytes(&self) -> &[u8] { unsafe { std::slice::from_raw_parts( self as *const $source_ty as *const u8, std::mem::size_of::<$source_ty>(), ) } } } impl SliceAsBytes for $source_ty { #[inline] #[allow(clippy::size_of_in_element_count)] fn slice_as_bytes(self_: &[Self]) -> &[u8] { unsafe { std::slice::from_raw_parts( self_.as_ptr() as *const u8, std::mem::size_of::<$source_ty>() * self_.len(), ) } } #[inline] #[allow(clippy::size_of_in_element_count)] unsafe fn slice_as_bytes_mut(self_: &mut [Self]) -> &mut [u8] { std::slice::from_raw_parts_mut( self_.as_mut_ptr() as *mut u8, std::mem::size_of::<$source_ty>() * self_.len(), ) } } }; } gen_as_bytes!(i8); gen_as_bytes!(i16); gen_as_bytes!(i32); gen_as_bytes!(i64); gen_as_bytes!(u8); gen_as_bytes!(u16); gen_as_bytes!(u32); gen_as_bytes!(u64); gen_as_bytes!(f32); gen_as_bytes!(f64); macro_rules! unimplemented_slice_as_bytes { ($ty: ty) => { impl SliceAsBytes for $ty { fn slice_as_bytes(_self: &[Self]) -> &[u8] { unimplemented!() } unsafe fn slice_as_bytes_mut(_self: &mut [Self]) -> &mut [u8] { unimplemented!() } } }; } // TODO - Can Int96 and bool be implemented in these terms? unimplemented_slice_as_bytes!(Int96); unimplemented_slice_as_bytes!(bool); unimplemented_slice_as_bytes!(ByteArray); unimplemented_slice_as_bytes!(FixedLenByteArray); impl AsBytes for bool { fn as_bytes(&self) -> &[u8] { unsafe { std::slice::from_raw_parts(self as *const bool as *const u8, 1) } } } impl AsBytes for Int96 { fn as_bytes(&self) -> &[u8] { unsafe { std::slice::from_raw_parts(self.data() as *const [u32] as *const u8, 12) } } } impl AsBytes for ByteArray { fn as_bytes(&self) -> &[u8] { self.data() } } impl AsBytes for FixedLenByteArray { fn as_bytes(&self) -> &[u8] { self.data() } } impl AsBytes for Decimal { fn as_bytes(&self) -> &[u8] { self.data() } } impl AsBytes for Vec<u8> { fn as_bytes(&self) -> &[u8] { self.as_slice() } } impl<'a> AsBytes for &'a str { fn as_bytes(&self) -> &[u8] { (self as &str).as_bytes() } } impl AsBytes for str { fn as_bytes(&self) -> &[u8] { (self as &str).as_bytes() } } pub(crate) mod private { use crate::encodings::decoding::PlainDecoderDetails; use crate::util::bit_util::{BitReader, BitWriter}; use crate::util::memory::ByteBufferPtr; use byteorder::ByteOrder; use std::convert::TryInto; use super::{ParquetError, Result, SliceAsBytes}; pub type BitIndex = u64; /// Sealed trait to start to remove specialisation from implementations /// /// This is done to force the associated value type to be unimplementable outside of this /// crate, and thus hint to the type system (and end user) traits are public for the contract /// and not for extension. pub trait ParquetValueType: std::cmp::PartialEq + std::fmt::Debug + std::fmt::Display + std::default::Default + std::clone::Clone + super::AsBytes + super::FromBytes + super::SliceAsBytes + PartialOrd { /// Encode the value directly from a higher level encoder fn encode<W: std::io::Write>( values: &[Self], writer: &mut W, bit_writer: &mut BitWriter, ) -> Result<()>; /// Establish the data that will be decoded in a buffer fn set_data( decoder: &mut PlainDecoderDetails, data: ByteBufferPtr, num_values: usize, ); /// Decode the value from a given buffer for a higher level decoder fn decode( buffer: &mut [Self], decoder: &mut PlainDecoderDetails, ) -> Result<usize>; /// Return the encoded size for a type fn dict_encoding_size(&self) -> (usize, usize) { (std::mem::size_of::<Self>(), 1) } /// Return the value as i64 if possible /// /// This is essentially the same as `std::convert::TryInto<i64>` but can /// implemented for `f32` and `f64`, types that would fail orphan rules fn as_i64(&self) -> Result<i64> { Err(general_err!("Type cannot be converted to i64")) } /// Return the value as u64 if possible /// /// This is essentially the same as `std::convert::TryInto<u64>` but can /// implemented for `f32` and `f64`, types that would fail orphan rules fn as_u64(&self) -> Result<u64> { self.as_i64() .map_err(|_| general_err!("Type cannot be converted to u64")) .map(|x| x as u64) } /// Return the value as an Any to allow for downcasts without transmutation fn as_any(&self) -> &dyn std::any::Any; /// Return the value as an mutable Any to allow for downcasts without transmutation fn as_mut_any(&mut self) -> &mut dyn std::any::Any; } impl ParquetValueType for bool { #[inline] fn encode<W: std::io::Write>( values: &[Self], _: &mut W, bit_writer: &mut BitWriter, ) -> Result<()> { if bit_writer.bytes_written() + values.len() / 8 >= bit_writer.capacity() { bit_writer.extend(256); } for value in values { if !bit_writer.put_value(*value as u64, 1) { return Err(ParquetError::EOF( "unable to put boolean value".to_string(), )); } } Ok(()) } #[inline] fn set_data( decoder: &mut PlainDecoderDetails, data: ByteBufferPtr, num_values: usize, ) { decoder.bit_reader.replace(BitReader::new(data)); decoder.num_values = num_values; } #[inline] fn decode( buffer: &mut [Self], decoder: &mut PlainDecoderDetails, ) -> Result<usize> { let bit_reader = decoder.bit_reader.as_mut().unwrap(); let num_values = std::cmp::min(buffer.len(), decoder.num_values); let values_read = bit_reader.get_batch(&mut buffer[..num_values], 1); decoder.num_values -= values_read; Ok(values_read) } #[inline] fn as_i64(&self) -> Result<i64> { Ok(*self as i64) } #[inline] fn as_any(&self) -> &dyn std::any::Any { self } #[inline] fn as_mut_any(&mut self) -> &mut dyn std::any::Any { self } } /// Hopelessly unsafe function that emulates `num::as_ne_bytes` /// /// It is not recommended to use this outside of this private module as, while it /// _should_ work for primitive values, it is little better than a transmutation /// and can act as a backdoor into mis-interpreting types as arbitary byte slices #[inline] fn as_raw<'a, T>(value: *const T) -> &'a [u8] { unsafe { let value = value as *const u8; std::slice::from_raw_parts(value, std::mem::size_of::<T>()) } } macro_rules! impl_from_raw { ($ty: ty, $self: ident => $as_i64: block) => { impl ParquetValueType for $ty { #[inline] fn encode<W: std::io::Write>(values: &[Self], writer: &mut W, _: &mut BitWriter) -> Result<()> { let raw = unsafe { std::slice::from_raw_parts( values.as_ptr() as *const u8, std::mem::size_of::<$ty>() * values.len(), ) }; writer.write_all(raw)?; Ok(()) } #[inline] fn set_data(decoder: &mut PlainDecoderDetails, data: ByteBufferPtr, num_values: usize) { decoder.data.replace(data); decoder.start = 0; decoder.num_values = num_values; } #[inline] fn decode(buffer: &mut [Self], decoder: &mut PlainDecoderDetails) -> Result<usize> { let data = decoder.data.as_ref().expect("set_data should have been called"); let num_values = std::cmp::min(buffer.len(), decoder.num_values); let bytes_left = data.len() - decoder.start; let bytes_to_decode = std::mem::size_of::<Self>() * num_values; if bytes_left < bytes_to_decode { return Err(eof_err!("Not enough bytes to decode")); } // SAFETY: Raw types should be as per the standard rust bit-vectors unsafe { let raw_buffer = &mut Self::slice_as_bytes_mut(buffer)[..bytes_to_decode]; raw_buffer.copy_from_slice(data.range(decoder.start, bytes_to_decode).as_ref()); }; decoder.start += bytes_to_decode; decoder.num_values -= num_values; Ok(num_values) } #[inline] fn as_i64(&$self) -> Result<i64> { $as_i64 } #[inline] fn as_any(&self) -> &dyn std::any::Any { self } #[inline] fn as_mut_any(&mut self) -> &mut dyn std::any::Any { self } } } } impl_from_raw!(i32, self => { Ok(*self as i64) }); impl_from_raw!(i64, self => { Ok(*self) }); impl_from_raw!(f32, self => { Err(general_err!("Type cannot be converted to i64")) }); impl_from_raw!(f64, self => { Err(general_err!("Type cannot be converted to i64")) }); impl ParquetValueType for super::Int96 { #[inline] fn encode<W: std::io::Write>( values: &[Self], writer: &mut W, _: &mut BitWriter, ) -> Result<()> { for value in values { let raw = unsafe { std::slice::from_raw_parts( value.data() as *const [u32] as *const u8, 12, ) }; writer.write_all(raw)?; } Ok(()) } #[inline] fn set_data( decoder: &mut PlainDecoderDetails, data: ByteBufferPtr, num_values: usize, ) { decoder.data.replace(data); decoder.start = 0; decoder.num_values = num_values; } #[inline] fn decode( buffer: &mut [Self], decoder: &mut PlainDecoderDetails, ) -> Result<usize> { // TODO - Remove the duplication between this and the general slice method let data = decoder .data .as_ref() .expect("set_data should have been called"); let num_values = std::cmp::min(buffer.len(), decoder.num_values); let bytes_left = data.len() - decoder.start; let bytes_to_decode = 12 * num_values; if bytes_left < bytes_to_decode { return Err(eof_err!("Not enough bytes to decode")); } let data_range = data.range(decoder.start, bytes_to_decode); let bytes: &[u8] = data_range.data(); decoder.start += bytes_to_decode; let mut pos = 0; // position in byte array for i in 0..num_values { let elem0 = byteorder::LittleEndian::read_u32(&bytes[pos..pos + 4]); let elem1 = byteorder::LittleEndian::read_u32(&bytes[pos + 4..pos + 8]); let elem2 = byteorder::LittleEndian::read_u32(&bytes[pos + 8..pos + 12]); buffer[i] .as_mut_any() .downcast_mut::<Self>() .unwrap() .set_data(elem0, elem1, elem2); pos += 12; } decoder.num_values -= num_values; Ok(num_values) } #[inline] fn as_any(&self) -> &dyn std::any::Any { self } #[inline] fn as_mut_any(&mut self) -> &mut dyn std::any::Any { self } } // TODO - Why does macro importing fail? /// Reads `$size` of bytes from `$src`, and reinterprets them as type `$ty`, in /// little-endian order. `$ty` must implement the `Default` trait. Otherwise this won't /// compile. /// This is copied and modified from byteorder crate. macro_rules! read_num_bytes { ($ty:ty, $size:expr, $src:expr) => {{ assert!($size <= $src.len()); let mut buffer = <$ty as $crate::util::bit_util::FromBytes>::Buffer::default(); buffer.as_mut()[..$size].copy_from_slice(&$src[..$size]); <$ty>::from_ne_bytes(buffer) }}; } impl ParquetValueType for super::ByteArray { #[inline] fn encode<W: std::io::Write>( values: &[Self], writer: &mut W, _: &mut BitWriter, ) -> Result<()> { for value in values { let len: u32 = value.len().try_into().unwrap(); writer.write_all(&len.to_ne_bytes())?; let raw = value.data(); writer.write_all(raw)?; } Ok(()) } #[inline] fn set_data( decoder: &mut PlainDecoderDetails, data: ByteBufferPtr, num_values: usize, ) { decoder.data.replace(data); decoder.start = 0; decoder.num_values = num_values; } #[inline] fn decode( buffer: &mut [Self], decoder: &mut PlainDecoderDetails, ) -> Result<usize> { let data = decoder .data .as_mut() .expect("set_data should have been called"); let num_values = std::cmp::min(buffer.len(), decoder.num_values); for i in 0..num_values { let len: usize = read_num_bytes!(u32, 4, data.start_from(decoder.start).as_ref()) as usize; decoder.start += std::mem::size_of::<u32>(); if data.len() < decoder.start + len { return Err(eof_err!("Not enough bytes to decode")); } let val: &mut Self = buffer[i].as_mut_any().downcast_mut().unwrap(); val.set_data(data.range(decoder.start, len)); decoder.start += len; } decoder.num_values -= num_values; Ok(num_values) } #[inline] fn dict_encoding_size(&self) -> (usize, usize) { (std::mem::size_of::<u32>(), self.len()) } #[inline] fn as_any(&self) -> &dyn std::any::Any { self } #[inline] fn as_mut_any(&mut self) -> &mut dyn std::any::Any { self } } impl ParquetValueType for super::FixedLenByteArray { #[inline] fn encode<W: std::io::Write>( values: &[Self], writer: &mut W, _: &mut BitWriter, ) -> Result<()> { for value in values { let raw = value.data(); writer.write_all(raw)?; } Ok(()) } #[inline] fn set_data( decoder: &mut PlainDecoderDetails, data: ByteBufferPtr, num_values: usize, ) { decoder.data.replace(data); decoder.start = 0; decoder.num_values = num_values; } #[inline] fn decode( buffer: &mut [Self], decoder: &mut PlainDecoderDetails, ) -> Result<usize> { assert!(decoder.type_length > 0); let data = decoder .data .as_mut() .expect("set_data should have been called"); let num_values = std::cmp::min(buffer.len(), decoder.num_values); for i in 0..num_values { let len = decoder.type_length as usize; if data.len() < decoder.start + len { return Err(eof_err!("Not enough bytes to decode")); } let val: &mut Self = buffer[i].as_mut_any().downcast_mut().unwrap(); val.set_data(data.range(decoder.start, len)); decoder.start += len; } decoder.num_values -= num_values; Ok(num_values) } #[inline] fn dict_encoding_size(&self) -> (usize, usize) { (std::mem::size_of::<u32>(), self.len()) } #[inline] fn as_any(&self) -> &dyn std::any::Any { self } #[inline] fn as_mut_any(&mut self) -> &mut dyn std::any::Any { self } } } /// Contains the Parquet physical type information as well as the Rust primitive type /// presentation. pub trait DataType: 'static { type T: private::ParquetValueType; /// Returns Parquet physical type. fn get_physical_type() -> Type; /// Returns size in bytes for Rust representation of the physical type. fn get_type_size() -> usize; fn get_column_reader(column_writer: ColumnReader) -> Option<ColumnReaderImpl<Self>> where Self: Sized; fn get_column_writer(column_writer: ColumnWriter) -> Option<ColumnWriterImpl<Self>> where Self: Sized; fn get_column_writer_ref( column_writer: &ColumnWriter, ) -> Option<&ColumnWriterImpl<Self>> where Self: Sized; fn get_column_writer_mut( column_writer: &mut ColumnWriter, ) -> Option<&mut ColumnWriterImpl<Self>> where Self: Sized; } // Workaround bug in specialization pub trait SliceAsBytesDataType: DataType where Self::T: SliceAsBytes, { } impl<T> SliceAsBytesDataType for T where T: DataType, <T as DataType>::T: SliceAsBytes, { } macro_rules! make_type { ($name:ident, $physical_ty:path, $reader_ident: ident, $writer_ident: ident, $native_ty:ty, $size:expr) => { #[derive(Clone)] pub struct $name {} impl DataType for $name { type T = $native_ty; fn get_physical_type() -> Type { $physical_ty } fn get_type_size() -> usize { $size } fn get_column_reader( column_writer: ColumnReader, ) -> Option<ColumnReaderImpl<Self>> { match column_writer { ColumnReader::$reader_ident(w) => Some(w), _ => None, } } fn get_column_writer( column_writer: ColumnWriter, ) -> Option<ColumnWriterImpl<Self>> { match column_writer { ColumnWriter::$writer_ident(w) => Some(w), _ => None, } } fn get_column_writer_ref( column_writer: &ColumnWriter, ) -> Option<&ColumnWriterImpl<Self>> { match column_writer { ColumnWriter::$writer_ident(w) => Some(w), _ => None, } } fn get_column_writer_mut( column_writer: &mut ColumnWriter, ) -> Option<&mut ColumnWriterImpl<Self>> { match column_writer { ColumnWriter::$writer_ident(w) => Some(w), _ => None, } } } }; } // Generate struct definitions for all physical types make_type!( BoolType, Type::BOOLEAN, BoolColumnReader, BoolColumnWriter, bool, 1 ); make_type!( Int32Type, Type::INT32, Int32ColumnReader, Int32ColumnWriter, i32, 4 ); make_type!( Int64Type, Type::INT64, Int64ColumnReader, Int64ColumnWriter, i64, 8 ); make_type!( Int96Type, Type::INT96, Int96ColumnReader, Int96ColumnWriter, Int96, mem::size_of::<Int96>() ); make_type!( FloatType, Type::FLOAT, FloatColumnReader, FloatColumnWriter, f32, 4 ); make_type!( DoubleType, Type::DOUBLE, DoubleColumnReader, DoubleColumnWriter, f64, 8 ); make_type!( ByteArrayType, Type::BYTE_ARRAY, ByteArrayColumnReader, ByteArrayColumnWriter, ByteArray, mem::size_of::<ByteArray>() ); make_type!( FixedLenByteArrayType, Type::FIXED_LEN_BYTE_ARRAY, FixedLenByteArrayColumnReader, FixedLenByteArrayColumnWriter, FixedLenByteArray, mem::size_of::<FixedLenByteArray>() ); impl FromBytes for Int96 { type Buffer = [u8; 12]; fn from_le_bytes(_bs: Self::Buffer) -> Self { unimplemented!() } fn from_be_bytes(_bs: Self::Buffer) -> Self { unimplemented!() } fn from_ne_bytes(bs: Self::Buffer) -> Self { let mut i = Int96::new(); i.set_data( from_ne_slice(&bs[0..4]), from_ne_slice(&bs[4..8]), from_ne_slice(&bs[8..12]), ); i } } // FIXME Needed to satisfy the constraint of many decoding functions but ByteArray does not // appear to actual be converted directly from bytes impl FromBytes for ByteArray { type Buffer = [u8; 8]; fn from_le_bytes(_bs: Self::Buffer) -> Self { unreachable!() } fn from_be_bytes(_bs: Self::Buffer) -> Self { unreachable!() } fn from_ne_bytes(bs: Self::Buffer) -> Self { ByteArray::from(bs.to_vec()) } } impl FromBytes for FixedLenByteArray { type Buffer = [u8; 8]; fn from_le_bytes(_bs: Self::Buffer) -> Self { unreachable!() } fn from_be_bytes(_bs: Self::Buffer) -> Self { unreachable!() } fn
(bs: Self::Buffer) -> Self { Self(ByteArray::from(bs.to_vec())) } } /// Macro to reduce repetition in making type assertions on the physical type against `T` macro_rules! ensure_phys_ty { ($($ty: pat)|+ , $err: literal) => { match T::get_physical_type() { $($ty => (),)* _ => panic!($err), }; } } #[cfg(test)] #[allow(clippy::float_cmp, clippy::approx_constant)] mod tests { use super::*; #[test] #[allow(clippy::string_lit_as_bytes)] fn test_as_bytes() { assert_eq!(false.as_bytes(), &[0]); assert_eq!(true.as_bytes(), &[1]); assert_eq!(7_i32.as_bytes(), &[7, 0, 0, 0]); assert_eq!(555_i32.as_bytes(), &[43, 2, 0, 0]); assert_eq!(555_u32.as_bytes(), &[43, 2, 0, 0]); assert_eq!(i32::max_value().as_bytes(), &[255, 255, 255, 127]); assert_eq!(i32::min_value().as_bytes(), &[0, 0, 0, 128]); assert_eq!(7_i64.as_bytes(), &[7, 0, 0, 0, 0, 0, 0, 0]); assert_eq!(555_i64.as_bytes(), &[43, 2, 0, 0, 0, 0, 0, 0]); assert_eq!( (i64::max_value()).as_bytes(), &[255, 255, 255, 255, 255, 255, 255, 127] ); assert_eq!((i64::min_value()).as_bytes(), &[0, 0, 0, 0, 0, 0, 0, 128]); assert_eq!(3.14_f32.as_bytes(), &[195, 245, 72, 64]); assert_eq!(3.14_f64.as_bytes(), &[31, 133, 235, 81, 184, 30, 9, 64]); assert_eq!("hello".as_bytes(), &[b'h', b'e', b'l', b'l', b'o']); assert_eq!( Vec::from("hello".as_bytes()).as_bytes(), &[b'h', b'e', b'l', b'l', b'o'] ); // Test Int96 let i96 = Int96::from(vec![1, 2, 3]); assert_eq!(i96.as_bytes(), &[1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0]); // Test ByteArray let ba = ByteArray::from(vec![1, 2, 3]); assert_eq!(ba.as_bytes(), &[1, 2, 3]); // Test Decimal let decimal = Decimal::from_i32(123, 5, 2); assert_eq!(decimal.as_bytes(), &[0, 0, 0, 123]); let decimal = Decimal::from_i64(123, 5, 2); assert_eq!(decimal.as_bytes(), &[0, 0, 0, 0, 0, 0, 0, 123]); let decimal = Decimal::from_bytes(ByteArray::from(vec![1, 2, 3]), 5, 2); assert_eq!(decimal.as_bytes(), &[1, 2, 3]); } #[test] fn test_int96_from() { assert_eq!( Int96::from(vec![1, 12345, 1234567890]).data(), &[1, 12345, 1234567890] ); } #[test] fn test_byte_array_from() { assert_eq!( ByteArray::from(vec![b'A', b'B', b'C']).data(), &[b'A', b'B', b'C'] ); assert_eq!(ByteArray::from("ABC").data(), &[b'A', b'B', b'C']); assert_eq!( ByteArray::from(ByteBufferPtr::new(vec![1u8, 2u8, 3u8, 4u8, 5u8])).data(), &[1u8, 2u8, 3u8, 4u8, 5u8] ); let mut buf = ByteBuffer::new(); buf.set_data(vec![6u8, 7u8, 8u8, 9u8, 10u8]); assert_eq!(ByteArray::from(buf).data(), &[6u8, 7u8, 8u8, 9u8, 10u8]); } #[test] fn test_decimal_partial_eq() { assert_eq!(Decimal::default(), Decimal::from_i32(0, 0, 0)); assert_eq!(Decimal::from_i32(222, 5, 2), Decimal::from_i32(222, 5, 2)); assert_eq!( Decimal::from_bytes(ByteArray::from(vec![0, 0, 0, 3]), 5, 2), Decimal::from_i32(3, 5, 2) ); assert!(Decimal::from_i32(222, 5, 2) != Decimal::from_i32(111, 5, 2)); assert!(Decimal::from_i32(222, 5, 2) != Decimal::from_i32(222, 6, 2)); assert!(Decimal::from_i32(222, 5, 2) != Decimal::from_i32(222, 5, 3)); assert!(Decimal::from_i64(222, 5, 2) != Decimal::from_i32(222, 5, 2)); } #[test] fn test_byte_array_ord() { let ba1 = ByteArray::from(vec![1, 2, 3]); let ba11 = ByteArray::from(vec![1, 2, 3]); let ba2 = ByteArray::from(vec![3, 4]); let ba3 = ByteArray::from(vec![1, 2, 4]); let ba4 = ByteArray::from(vec![]); let ba5 = ByteArray::from(vec![2, 2, 3]); assert!(ba1 > ba2); assert!(ba3 > ba1); assert!(ba1 > ba4); assert_eq!(ba1, ba11); assert!(ba5 > ba1); } }
from_ne_bytes
lp.py
import numpy as np from gcn.graphconv import ap_approximate def Model17(adj, alpha, y_train, y_test):
k = int(np.ceil(4 * alpha)) prediction, time = ap_approximate(adj, y_train, alpha, k) predicted_labels = np.argmax(prediction, axis=1) prediction = np.zeros(prediction.shape) prediction[np.arange(prediction.shape[0]), predicted_labels] = 1 test_acc = np.sum(prediction * y_test) / np.sum(y_test) test_acc_of_class = np.sum(prediction * y_test, axis=0) / np.sum(y_test, axis=0) return test_acc, test_acc_of_class
util.go
package server import( "log" "net/http" ) // Memcmp returns true if the first n bytes of two slices are // equal and false otherwise, where n is Min(len(a), len(b)). func Memcmp(a, b []byte) bool { var n int if len(a) < len(b) { n = len(a) } else { n = len(b) } for i := 0; i < n; i++ { if a[i] != b[i] { return false } } return true } // Assert checks if condition is false, and exits the program if it is. // Assert logs message to the standard logger before exiting if the // assertion fails. func Assert(condition bool, message interface{}) { if !condition { log.Fatal(message) } } // Assertf is equivalent to Assert, but with a format string func Assertf(condition bool, format string, args ...interface{})
// ReadBody reads the body of an http.Request into a []byte func ReadBody(req *http.Request) (body []byte, err error) { body = make([]byte, req.ContentLength) read, err := req.Body.Read(body); if int64(read) == req.ContentLength { err = nil } return body, err }
{ if !condition { log.Fatalf(format, args) } }
multi_root_tree_test.go
package tree_test import ( "testing" "github.com/stretchr/testify/assert" "github.com/odpf/optimus/core/tree" "github.com/odpf/optimus/models" ) func TestMultiRootDagTree(t *testing.T) { t.Run("GetNameAndDependents", func(t *testing.T) { treeNode1 := tree.NewTreeNode(models.JobSpec{ Name: "job1", }) treeNode2 := tree.NewTreeNode(models.JobSpec{ Name: "job2",
multiRootTree.AddNodeIfNotExist(treeNode1) multiRootTree.AddNodeIfNotExist(treeNode2) err := multiRootTree.IsCyclic() assert.NotNil(t, err) assert.Contains(t, err.Error(), tree.ErrCyclicDependencyEncountered.Error()) }) t.Run("MarkRoot", func(t *testing.T) { treeNode1 := tree.NewTreeNode(models.JobSpec{ Name: "job1", }) multiRootTree := tree.NewMultiRootTree() multiRootTree.AddNode(treeNode1) multiRootTree.MarkRoot(treeNode1) rootNodes := multiRootTree.GetRootNodes() assert.Equal(t, 1, len(rootNodes)) assert.Equal(t, "job1", rootNodes[0].Data.GetName()) }) t.Run("IsCyclic", func(t *testing.T) { t.Run("should throw an error if cyclic", func(t *testing.T) { treeNode1 := tree.NewTreeNode(models.JobSpec{ Name: "job1", }) treeNode2 := tree.NewTreeNode(models.JobSpec{ Name: "job2", }) multiRootTree := tree.NewMultiRootTree() multiRootTree.AddNode(treeNode1) multiRootTree.AddNode(treeNode2) treeNode1.AddDependent(treeNode2) treeNode2.AddDependent(treeNode1) err := multiRootTree.IsCyclic() assert.NotNil(t, err) assert.Contains(t, err.Error(), "cycle dependency") }) t.Run("should not return error if not cyclic", func(t *testing.T) { treeNode1 := tree.NewTreeNode(models.JobSpec{ Name: "job1", }) treeNode2 := tree.NewTreeNode(models.JobSpec{ Name: "job2", }) multiRootTree := tree.NewMultiRootTree() multiRootTree.AddNode(treeNode1) multiRootTree.AddNode(treeNode2) treeNode1.AddDependent(treeNode2) err := multiRootTree.IsCyclic() assert.Nil(t, err) }) }) }
}) multiRootTree := tree.NewMultiRootTree() treeNode1.AddDependent(treeNode2) treeNode2.AddDependent(treeNode1)
system.rs
//! Modular & extendable Service interface use actix_web::{http::Method, App, AsyncResponder, Error, Path, Result}; use serde_json::Value as JsonValue; use crate::api; use crate::api::*; use crate::service::Service; /// Service contoh, kamu bisa mencontoh bagaimana caranya membuat service /// dengan melihat kode [SystemService] ini. pub struct
; impl SystemService { #[doc(hidden)] pub fn new() -> Box<Self> { Box::new(Self {}) } } impl Service for SystemService { fn name(&self) -> &'static str { "system" } fn wire_api(&self, builder: &mut ServiceApiBuilder) { builder.public_scope().link(PublicApi::wire); } } #[derive(Deserialize)] pub struct CheckVersion { pub version: String, pub platform: String, } /// Contoh API public untuk service contoh [[SystemService]]. struct PublicApi {} #[api_group("System", "public", base = "/system/v1")] impl PublicApi { /// Get build information. #[api_endpoint(path = "/info", auth = "optional")] pub fn info(state: &AppState, query: ()) -> JsonValue { Ok(json!({ "version": env!("CARGO_PKG_VERSION"), "build": env!("BUILD_INFO"), "git": env!("GIT_REV") })) } /// check for new version #[api_endpoint(path = "/check_version", auth = "optional")] pub fn check_version(state: &AppState, query: CheckVersion) -> JsonValue { // @TODO(Rboin): code here Ok(json!({ "code": 0, "description": "", "result": { "new_update": "", "notes": "" } })) } }
SystemService
delete_test.go
package cli import ( "testing" // external "github.com/stretchr/testify/assert" ) func TestDeleteCmdHasUse(t *testing.T) { assert.NotEmpty(t, DeleteCmd.Use) } func
(t *testing.T) { assert.NotEmpty(t, DeleteCmd.Short) } func TestDeleteCmdHasLong(t *testing.T) { assert.NotEmpty(t, DeleteCmd.Long) } func TestDeleteCmdHasRun(t *testing.T) { assert.NotEmpty(t, DeleteCmd.Run) }
TestDeleteCmdHasShort
Intern.js
// TODO: Write code to define and export the Intern class. HINT: This class should inherit from Employee. const Employee = require("./Employee"); class Intern extends Employee { constructor(name, id, email, school) { super(name, id, email); this.school = school; }; getRole() { return "Intern"; };
getSchool() { return this.school; }; }; module.exports = Intern;
classification.py
""" __author__: Abhishek Thakur """ import torch import numpy as np from PIL import Image from PIL import ImageFile try: import torch_xla.core.xla_model as xm _xla_available = True except ImportError: _xla_available = False ImageFile.LOAD_TRUNCATED_IMAGES = True class ClassificationDataset: def __init__(self, image_paths, targets, resize, augmentations=None): """ :param image_paths: list of paths to images :param targets: numpy array :param resize: tuple or None :param augmentations: albumentations augmentations """ self.image_paths = image_paths self.targets = targets self.resize = resize self.augmentations = augmentations def
(self): return len(self.image_paths) def __getitem__(self, item): image = Image.open(self.image_paths[item]) targets = self.targets[item] if self.resize is not None: image = image.resize( (self.resize[1], self.resize[0]), resample=Image.BILINEAR ) image = np.array(image) if self.augmentations is not None: augmented = self.augmentations(image=image) image = augmented["image"] image = np.transpose(image, (2, 0, 1)).astype(np.float32) return { "image": torch.tensor(image), "targets": torch.tensor(targets), } class ClassificationDataLoader: def __init__(self, image_paths, targets, resize, augmentations=None): """ :param image_paths: list of paths to images :param targets: numpy array :param resize: tuple or None :param augmentations: albumentations augmentations """ self.image_paths = image_paths self.targets = targets self.resize = resize self.augmentations = augmentations self.dataset = ClassificationDataset( image_paths=self.image_paths, targets=self.targets, resize=self.resize, augmentations=self.augmentations, ) def fetch(self, batch_size, num_workers, drop_last=False, shuffle=True, tpu=False): """ :param batch_size: batch size :param num_workers: number of processes to use :param drop_last: drop the last batch? :param shuffle: True/False :param tpu: True/False, to use tpu or not """ sampler = None if tpu: sampler = torch.utils.data.distributed.DistributedSampler( self.dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal(), shuffle=shuffle, ) data_loader = torch.utils.data.DataLoader( self.dataset, batch_size=batch_size, sampler=sampler, drop_last=drop_last, num_workers=num_workers, ) return data_loader
__len__
article.go
package handler import ( "context" "fmt" "net/http" "github.com/cymonevo/monitor-backend/internal/log" "github.com/cymonevo/monitor-backend/internal/router" "github.com/cymonevo/monitor-backend/module/article/model" ) type articleHandlerImpl struct { router router.Router factory model.Factory } func
(router router.Router, factory model.Factory) *articleHandlerImpl { return &articleHandlerImpl{ router: router, factory: factory, } } func (h *articleHandlerImpl) Register() router.Router { h.router.SetPrefix("/article") h.router.HandleJSON("", http.MethodGet, h.index) h.router.HandleView("/view", http.MethodGet, h.view) //test endpoints h.router.HandleJSON("/get", http.MethodGet, h.get) h.router.HandleJSON("/post", http.MethodPost, h.post) return h.router } func (h *articleHandlerImpl) get(ctx context.Context, r *http.Request) (interface{}, error) { auth := r.Header.Get("Authorization") query := GetQueryParam(r, "data") return fmt.Sprint("GET", "\nAUTH: ", auth, "\nDATA: ", query), nil } func (h *articleHandlerImpl) post(ctx context.Context, r *http.Request) (interface{}, error) { auth := r.Header.Get("Authorization") var data interface{} err := ParseBody(r.Body, &data) if err != nil { log.ErrorDetail("Article", "error parse request body", err) return nil, err } return fmt.Sprint("POST", "\nAUTH: ", auth, "\nDATA: ", data), nil } func (h *articleHandlerImpl) index(ctx context.Context, r *http.Request) (interface{}, error) { return struct { Version string `json:"version"` Build string `json:"build_version"` }{ Version: "0.0.1", Build: "alpha", }, nil } func (h *articleHandlerImpl) view(ctx context.Context, r *http.Request) (router.RenderRequest, error) { type invoice struct { Invoice string OrderID string } return router.RenderRequest{ Template: "invoice.html", Data: invoice{ Invoice: "INV/2018/123", OrderID: "123", }, }, nil } func (h *articleHandlerImpl) health(ctx context.Context, r *http.Request) (interface{}, error) { return h.factory.NewHealthModel().Do(ctx) }
NewArticleHandler
get_domain_topic.py
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables __all__ = [ 'GetDomainTopicResult', 'AwaitableGetDomainTopicResult', 'get_domain_topic', ] warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:eventgrid:getDomainTopic'.""", DeprecationWarning) @pulumi.output_type class GetDomainTopicResult: """ Domain Topic. """ def __init__(__self__, id=None, name=None, provisioning_state=None, type=None): if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if provisioning_state and not isinstance(provisioning_state, str): raise TypeError("Expected argument 'provisioning_state' to be a str") pulumi.set(__self__, "provisioning_state", provisioning_state) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter def id(self) -> str:
@property @pulumi.getter def name(self) -> str: """ Name of the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> Optional[str]: """ Provisioning state of the domain topic. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def type(self) -> str: """ Type of the resource. """ return pulumi.get(self, "type") class AwaitableGetDomainTopicResult(GetDomainTopicResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetDomainTopicResult( id=self.id, name=self.name, provisioning_state=self.provisioning_state, type=self.type) def get_domain_topic(domain_name: Optional[str] = None, domain_topic_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDomainTopicResult: """ Domain Topic. Latest API Version: 2020-06-01. :param str domain_name: Name of the domain. :param str domain_topic_name: Name of the topic. :param str resource_group_name: The name of the resource group within the user's subscription. """ pulumi.log.warn("get_domain_topic is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:eventgrid:getDomainTopic'.") __args__ = dict() __args__['domainName'] = domain_name __args__['domainTopicName'] = domain_topic_name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-nextgen:eventgrid/latest:getDomainTopic', __args__, opts=opts, typ=GetDomainTopicResult).value return AwaitableGetDomainTopicResult( id=__ret__.id, name=__ret__.name, provisioning_state=__ret__.provisioning_state, type=__ret__.type)
""" Fully qualified identifier of the resource. """ return pulumi.get(self, "id")
test_end2end.py
from convlab2.nlu.svm.multiwoz import SVMNLU from convlab2.nlu.jointBERT.multiwoz import BERTNLU from convlab2.nlu.milu.multiwoz import MILU from convlab2.dst.rule.multiwoz import RuleDST from convlab2.policy.rule.multiwoz import RulePolicy from convlab2.nlg.template.multiwoz import TemplateNLG from convlab2.dialog_agent import PipelineAgent, BiSession from convlab2.evaluator.multiwoz_eval import MultiWozEvaluator from pprint import pprint import random import numpy as np import torch sys_nlu = BERTNLU(mode='all', config_file='multiwoz_all.json', model_file='https://tatk-data.s3-ap-northeast-1.amazonaws.com/bert_multiwoz_all.zip') # sys_nlu = SVMNLU(mode='sys') # simple rule DST sys_dst = RuleDST() # rule policy sys_policy = RulePolicy(character='sys') # template NLG sys_nlg = TemplateNLG(is_user=False) # assemble sys_agent = PipelineAgent(sys_nlu, sys_dst, sys_policy, sys_nlg, 'sys') # user_nlu = sys_nlu # user_nlu = SVMNLU(mode='all') user_nlu = MILU(model_file="https://convlab.blob.core.windows.net/models/milu.tar.gz") # not use dst user_dst = None # rule policy user_policy = RulePolicy(character='usr') # template NLG user_nlg = TemplateNLG(is_user=True) # assemble user_agent = PipelineAgent(user_nlu, None, user_policy, user_nlg, 'user') evaluator = MultiWozEvaluator() sess = BiSession(sys_agent=sys_agent, user_agent=user_agent, kb_query=None, evaluator=evaluator) random.seed(20200131) np.random.seed(20190827) torch.manual_seed(20200131) sys_response = '' sess.init_session() print('init goal:') pprint(sess.evaluator.goal) print('-'*50) for i in range(40): sys_response, user_response, session_over, reward = sess.next_turn(sys_response) print('user:', user_response) print('sys:', sys_response) print() if session_over is True: print('task complete:', user_policy.policy.goal.task_complete()) print('task success:', sess.evaluator.task_success()) print('book rate:', sess.evaluator.book_rate()) print('inform precision/recall/f1:', sess.evaluator.inform_F1()) print('-'*50) print('final goal:') pprint(sess.evaluator.goal) print('='*100) break total_dialog = 10 random.seed(20200131) goal_seeds = [random.randint(1,100000) for _ in range(total_dialog)] precision = 0 recall = 0 f1 = 0 suc_num = 0 complete_num = 0 for j in range(total_dialog): sys_response = '' random.seed(goal_seeds[0]) np.random.seed(goal_seeds[0]) torch.manual_seed(goal_seeds[0]) goal_seeds.pop(0) sess.init_session() # print('init goal:') # pprint(sess.evaluator.goal) # print('-'*50) for i in range(40): sys_response, user_response, session_over, reward = sess.next_turn( sys_response) # print('user:', user_response) # print('sys:', sys_response) if session_over is True: if sess.evaluator.task_success() == 1: suc_num = suc_num+1 if user_policy.policy.goal.task_complete(): complete_num += 1 print('task complete:', user_policy.policy.goal.task_complete()) print('task success:', sess.evaluator.task_success()) print('book rate:', sess.evaluator.book_rate()) print('inform precision/recall/f1:', sess.evaluator.inform_F1()) stats = sess.evaluator.inform_F1() if(stats[0] != None): precision = precision+stats[0] if(stats[1] != None): recall = recall+stats[1] if(stats[2] != None): f1 = f1+stats[2] else:
# print('-'*50) # print('final goal:') # pprint(sess.evaluator.goal) # print('='*100) break print("complete number of dialogs/tot:", complete_num/total_dialog) print("success number of dialogs/tot:", suc_num/total_dialog) print("average precision:", precision/total_dialog) print("average recall:", recall/total_dialog) print("average f1:", f1/total_dialog)
suc_num = suc_num-1
storage.go
package elasticsearch import ( "bytes" "context" "encoding/json" "fmt" "strconv" "strings" "github.com/alerting/alerts/pkg/alerts" "github.com/alerting/alerts/pkg/cap" raven "github.com/getsentry/raven-go" "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/ptypes" "github.com/olivere/elastic" opentracing "github.com/opentracing/opentracing-go" "github.com/opentracing/opentracing-go/ext" otlog "github.com/opentracing/opentracing-go/log" log "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) // Storage defines an Elasticsearch alerts storage. type Storage struct { Client *elastic.Client Index string } // NewStorage creates the storage. func NewStorage(url string, index string, opts ...elastic.ClientOptionFunc) (alerts.Storage, error) { // Generate ElasticSearch client. opts = append(opts, elastic.SetURL(url)) client, err := elastic.NewClient(opts...) if err != nil { return nil, err } // Configure the ElasticSearch index. log.WithField("index", index).Debug("Ensuring the index exists") exists, err := client.IndexExists(index).Do(context.Background()) if err != nil { log.WithError(err).Error("Failed to ensure index exists") raven.CaptureErrorAndWait(err, map[string]string{ "index": index, }) return nil, err } // Create the index, if it doesn't exist. if !exists { log.WithField("index", index).Info("Creating index") _, err := client.CreateIndex(index).BodyString(mapping).Do(context.Background()) if err != nil { log.WithError(err).Error("Failed to create index") raven.CaptureErrorAndWait(err, map[string]string{ "index": index, }) return nil, err } } // Return storage. return &Storage{ Client: client, Index: index, }, nil } func generateRangeQuery(ctx context.Context, field string, ts *alerts.TimeConditions) (*elastic.RangeQuery, error)
// Add adds the alert to ElasticSearch. func (s *Storage) Add(ctx context.Context, alert *cap.Alert) error { span, _ := opentracing.StartSpanFromContext(ctx, "Add") defer span.Finish() bulk := s.Client.Bulk().Index(s.Index).Type("doc") span.LogEventWithPayload("alert", fmt.Sprintf("%s %s,%s,%s", alert.Id, alert.Identifier, alert.Sender, alert.Sent)) // Conver the alert to a map // Convert to map[string]interface{} var alertMap map[string]interface{} b, _ := (&jsonpb.Marshaler{}).MarshalToString(alert) json.Unmarshal([]byte(b), &alertMap) // Remove the infos item, as we need to add those seperately delete(alertMap, "infos") // Setup the child-parent relationship alertMap["_object"] = map[string]string{ "name": "alert", } // Index the alert bulk.Add( elastic.NewBulkIndexRequest(). Id(alert.Id). Doc(alertMap)) // Index the infos for indx, info := range alert.Infos { // Some cleanup if info.Effective == nil { info.Effective = alert.Sent } var infoMap map[string]interface{} b, _ := (&jsonpb.Marshaler{}).MarshalToString(info) json.Unmarshal([]byte(b), &infoMap) // Setup Parent infoMap["_object"] = map[string]string{ "name": "info", "parent": alert.Id, } bulk.Add( elastic.NewBulkIndexRequest(). Id(fmt.Sprintf("%s:%d", alert.Id, indx)). Routing(alert.Id). Doc(infoMap)) } // Run bulk actions if bulk.NumberOfActions() > 0 { _, err := bulk.Do(ctx) if err != nil { log.WithError(err).Error("Failed to run bulk actions") raven.CaptureError(err, nil) ext.Error.Set(span, true) span.LogFields(otlog.Error(err)) return err } } return nil } // Get returns the alert described by reference from ElasticSearch. func (s *Storage) Get(ctx context.Context, reference *cap.Reference) (*cap.Alert, error) { // Start a new span. span, sctx := opentracing.StartSpanFromContext(ctx, "Storage.ElasticSearch::Get") defer span.Finish() // Save information about the request. if reference.Id != "" { span.SetTag("reference.id", reference.Id) } else { span.SetTag("reference.identifier", reference.Identifier) span.SetTag("reference.sender", reference.Sender) span.SetTag("reference.sent", reference.Sent) } id := reference.Id if id == "" { id = reference.ID() } span.LogEvent("Fetching alert") log.WithFields(log.Fields{ "id": id, "index": s.Index, }).Debug("Fetching alert from ElasticSearch") item, err := s.Client.Get().Index(s.Index).Type("doc").Id(id).Do(sctx) log.Debug("Response received") if err != nil { log.WithError(err).Error("Unable to fetch alert from ElasticSearch") raven.CaptureError(err, map[string]string{ "id": id, "index": s.Index, }) ext.Error.Set(span, true) span.LogFields(otlog.Error(err)) if elastic.IsNotFound(err) { return nil, status.Error(codes.NotFound, "Alert was not found") } return nil, err } // If we were sent an invalid ID (ie. of an info block), but matched a document, then return error. if item.Routing != "" { err := fmt.Errorf("Invalid alert identifier: %s", id) log.WithError(err).Error("Unable to fetch alert from ElasticSearch") raven.CaptureError(err, nil) ext.Error.Set(span, true) span.LogFields(otlog.Error(err)) return nil, err } // Start fetching the alert. span.LogEvent("Processing alert") var alert cap.Alert dec := &jsonpb.Unmarshaler{ AllowUnknownFields: true, } err = dec.Unmarshal(bytes.NewReader(*item.Source), &alert) if err != nil { log.WithError(err).Error("Unable to unmarshal alert") raven.CaptureError(err, nil) ext.Error.Set(span, true) span.LogFields(otlog.Error(err)) return nil, err } // Generate the query to find infos. span.LogEvent("Fetching infos") log.Debug("Finding infos associated with the alert") q := elastic.NewParentIdQuery("info", id) search := s.Client.Search().Index(s.Index).Type("doc") search = search.Query(q) // Realistically, there shouldn't be more than ~4 (Canada NAAD) search = search.From(0).Size(20) search = search.Sort("_id", true) res, err := search.Do(sctx) log.Debug("Response received") if err != nil { log.WithError(err).Error("Failed to find infos") raven.CaptureError(err, nil) ext.Error.Set(span, true) span.LogFields(otlog.Error(err)) return nil, err } span.LogEvent("Processing infos") if res.Hits != nil { if res.TotalHits() != int64(len(res.Hits.Hits)) { log.Warnf("Got %d of %d infos", len(res.Hits.Hits), res.TotalHits) raven.CaptureMessage("Did not load all infos", map[string]string{ "id": id, "total": strconv.FormatInt(res.TotalHits(), 10), "loaded": strconv.Itoa(len(res.Hits.Hits)), }) } log.Debugf("Got %d infos for alert", res.TotalHits()) for _, hit := range res.Hits.Hits { var info cap.Info err = dec.Unmarshal(bytes.NewReader(*hit.Source), &info) if err != nil { log.WithError(err).Error("Failed to unmarshal info") raven.CaptureError(err, nil) ext.Error.Set(span, true) span.LogFields(otlog.Error(err)) return nil, err } alert.Infos = append(alert.Infos, &info) } } else { log.WithField("id", id).Warn("No infos found for alert") } return &alert, nil } // Has returns whether or not an alert exists for the given reference. func (s *Storage) Has(ctx context.Context, reference *cap.Reference) (bool, error) { // Start a new span. span, sctx := opentracing.StartSpanFromContext(ctx, "Storage.ElasticSearch::Has") defer span.Finish() // Save information about the request. if reference.Id != "" { span.SetTag("reference.id", reference.Id) } else { span.SetTag("reference.identifier", reference.Identifier) span.SetTag("reference.sender", reference.Sender) span.SetTag("reference.sent", reference.Sent) } id := reference.Id if id == "" { id = reference.ID() } // Check if the item exists. item := elastic.NewMultiGetItem(). Index(s.Index). Type("doc"). Id(id). FetchSource(elastic.NewFetchSourceContext(false)) res, err := s.Client.MultiGet().Add(item).Do(sctx) if err != nil { log.WithError(err).Error("Failed to get from ElasticSearch") raven.CaptureError(err, nil) ext.Error.Set(span, true) span.LogFields(otlog.Error(err)) return false, err } span.SetTag("result", res.Docs[0].Found) return res.Docs[0].Found, nil } // Find returns alerts matching the search criteria. NOTE: Results are per Info block. func (s *Storage) Find(ctx context.Context, criteria *alerts.FindCriteria) (*alerts.FindResult, error) { // Start a new span. span, sctx := opentracing.StartSpanFromContext(ctx, "Storage.ElasticSearch::Find") defer span.Finish() search := s.Client.Search(s.Index).Type("doc") // Pagination if criteria.Start > 0 { log.WithField("value", criteria.Start).Debug("start") span.SetTag("start", criteria.Start) search = search.From(int(criteria.Start)) } if criteria.Count > 0 { log.WithField("value", criteria.Count).Debug("count") span.SetTag("count", criteria.Count) search = search.Size(int(criteria.Count)) } // Sort if len(criteria.Sort) == 0 { // Sort by effective, descending (if no sort provided) criteria.Sort = []string{"-effective"} } log.WithField("value", criteria.Sort).Debug("sort") span.SetTag("sort", criteria.Sort) for _, field := range criteria.Sort { asc := true if strings.HasPrefix(field, "-") { field = field[1:] asc = false } search = search.Sort(field, asc) } // Response fields if len(criteria.Fields) > 0 { log.WithField("value", criteria.Fields).Debug("Fields") search = search.FetchSourceContext(elastic.NewFetchSourceContext(true).Include(criteria.Fields...)) } // Generate the query query := elastic.NewBoolQuery() // Alert filters alertQuery := elastic.NewBoolQuery() // We don't want referenced alerts. alertQuery = alertQuery.Must(elastic.NewExistsQuery("status")) if criteria.Superseded { log.WithField("value", true).Debug("superseded") span.SetTag("superseded", true) alertQuery = alertQuery.Must(elastic.NewTermQuery("superseded", true)) } if criteria.NotSuperseded { log.WithField("value", false).Debug("superseded") span.SetTag("superseded", false) alertQuery = alertQuery.MustNot(elastic.NewTermQuery("superseded", true)) } if criteria.Status != cap.Alert_STATUS_UNKNOWN { log.WithField("value", criteria.Status).Debug("status") span.SetTag("status", criteria.Status) alertQuery = alertQuery.Must(elastic.NewTermQuery("status", criteria.Status.String())) } if criteria.MessageType != cap.Alert_MESSAGE_TYPE_UNKNOWN { log.WithField("value", criteria.MessageType).Debug("messageType") span.SetTag("messageType", criteria.MessageType) alertQuery = alertQuery.Must(elastic.NewTermQuery("messageType", criteria.MessageType.String())) } if criteria.Scope != cap.Alert_SCOPE_UNKNOWN { log.WithField("value", criteria.Scope).Debug("scope") span.SetTag("scope", criteria.Scope) alertQuery = alertQuery.Must(elastic.NewTermQuery("scope", criteria.Scope.String())) } alertInnerHit := elastic.NewInnerHit().FetchSource(true) if len(criteria.Fields) > 0 { alertInnerHit = elastic.NewInnerHit().FetchSourceContext(elastic.NewFetchSourceContext(true).Include(criteria.Fields...)) } query = query.Must(elastic.NewHasParentQuery("alert", alertQuery).InnerHit(alertInnerHit)) // Info filters if criteria.Language != "" { log.WithField("value", criteria.Language).Debug("language") span.SetTag("language", criteria.Language) if strings.ContainsAny(criteria.Language, "*?") { query = query.Must(elastic.NewWildcardQuery("language", criteria.Language)) } else { query = query.Must(elastic.NewTermQuery("language", criteria.Language)) } } if criteria.Certainty != cap.Info_CERTAINTY_UNKNOWN { log.WithField("value", criteria.Certainty).Debug("certainty") span.SetTag("certainty", criteria.Certainty) query = query.Must(elastic.NewTermQuery("certainty", criteria.Certainty.String())) } if criteria.Severity != cap.Info_SEVERITY_UNKNOWN { log.WithField("value", criteria.Severity).Debug("severity") span.SetTag("severity", criteria.Severity) query = query.Must(elastic.NewTermQuery("severity", criteria.Severity.String())) } if criteria.Urgency != cap.Info_URGENCY_UNKNOWN { log.WithField("value", criteria.Urgency).Debug("urgency") span.SetTag("urgency", criteria.Urgency) query = query.Must(elastic.NewTermQuery("urgency", criteria.Urgency.String())) } if criteria.Effective != nil { log.WithField("value", criteria.Effective).Debug("effective") rangeQuery, err := generateRangeQuery(sctx, "effective", criteria.Effective) if err != nil { log.WithError(err).Error("Failed to generate Effective range query") raven.CaptureError(err, nil) ext.Error.Set(span, true) span.LogFields(otlog.Error(err)) return nil, err } query = query.Must(rangeQuery) } if criteria.Expires != nil { log.WithField("value", criteria.Expires).Debug("expires") rangeQuery, err := generateRangeQuery(sctx, "expires", criteria.Expires) if err != nil { log.WithError(err).Error("Failed to generate Expires range query") raven.CaptureError(err, nil) ext.Error.Set(span, true) span.LogFields(otlog.Error(err)) return nil, err } query = query.Must(rangeQuery) } if criteria.Onset != nil { log.WithField("value", criteria.Expires).Debug("onset") rangeQuery, err := generateRangeQuery(sctx, "onset", criteria.Onset) if err != nil { log.WithError(err).Error("Failed to generate Onset range query") raven.CaptureError(err, nil) ext.Error.Set(span, true) span.LogFields(otlog.Error(err)) return nil, err } query = query.Must(rangeQuery) } if criteria.Headline != "" { log.WithField("value", criteria.Headline).Debug("headline") span.SetTag("headline", criteria.Headline) query = query.Must(elastic.NewQueryStringQuery(criteria.Headline).Field("headline")) } if criteria.Description != "" { log.WithField("value", criteria.Description).Debug("description") span.SetTag("description", criteria.Description) query = query.Must(elastic.NewQueryStringQuery(criteria.Description).Field("description")) } if criteria.Instruction != "" { log.WithField("value", criteria.Instruction).Debug("instruction") span.SetTag("instruction", criteria.Description) query = query.Must(elastic.NewQueryStringQuery(criteria.Instruction).Field("instruction")) } // Info.Area areaQuery := elastic.NewBoolQuery() if criteria.AreaDescription != "" { log.WithField("value", criteria.AreaDescription).Debug("areas.description") span.SetTag("areas.description", criteria.AreaDescription) areaQuery = areaQuery.Must(elastic.NewQueryStringQuery(criteria.AreaDescription).Field("areas.description")) } if criteria.Point != nil { log.WithField("value", criteria.Point).Debug("point") areaQuery = areaQuery.Must(elastic.NewBoolQuery(). Should(NewGeoShapeQuery("areas.polygons").SetPoint(criteria.Point.Lat, criteria.Point.Lon)). Should(NewGeoShapeQuery("areas.circles").SetPoint(criteria.Point.Lat, criteria.Point.Lon))) } query = query.Must(elastic.NewNestedQuery("areas", areaQuery).InnerHit(elastic.NewInnerHit().FetchSource(false))) // Do the search search = search.Query(query) results, err := search.Do(sctx) if err != nil { log.WithError(err).Error("Failed to execute search") raven.CaptureError(err, nil) ext.Error.Set(span, true) span.LogFields(otlog.Error(err)) return nil, err } response := alerts.FindResult{ Total: results.TotalHits(), Hits: make([]*alerts.Hit, 0), } unmarshaller := &jsonpb.Unmarshaler{ AllowUnknownFields: true, } for _, hit := range results.Hits.Hits { var alert *cap.Alert var info *cap.Info if innerHit, ok := hit.InnerHits["alert"]; ok { alert = new(cap.Alert) if err := unmarshaller.Unmarshal(bytes.NewReader(*innerHit.Hits.Hits[0].Source), alert); err != nil { log.WithError(err).Error("Failed to unmarshal alert") raven.CaptureError(err, nil) ext.Error.Set(span, true) span.LogFields(otlog.Error(err)) return nil, err } } if hit.Source != nil { info = new(cap.Info) if err := unmarshaller.Unmarshal(bytes.NewReader(*hit.Source), info); err != nil { log.WithError(err).Error("Failed to unmarshal info") raven.CaptureError(err, nil) ext.Error.Set(span, true) span.LogFields(otlog.Error(err)) return nil, err } } response.Hits = append(response.Hits, &alerts.Hit{ Id: hit.Id, Alert: alert, Info: info, }) } return &response, nil } // Supersede marks the reference as superseded. func (s *Storage) Supersede(ctx context.Context, reference *cap.Reference) error { // Start a new span. span, sctx := opentracing.StartSpanFromContext(ctx, "Storage.ElasticSearch::Supersede") defer span.Finish() // Save information about the request. if reference.Id != "" { span.SetTag("reference.id", reference.Id) } else { span.SetTag("reference.identifier", reference.Identifier) span.SetTag("reference.sender", reference.Sender) span.SetTag("reference.sent", reference.Sent) } id := reference.Id if id == "" { id = reference.ID() } log.WithField("id", id).Debug("Superseding alert") update := map[string]interface{}{ "superseded": true, } _, err := s.Client.Update().Index(s.Index).Type("doc").Id(id).Doc(update).Do(sctx) if err != nil { log.WithError(err).Error("ElasticSearch update failed") raven.CaptureError(err, nil) ext.Error.Set(span, true) span.LogFields(otlog.Error(err)) return err } return nil } // IsSuperseded returns whether or not the referenced alert has been superseded. func (s *Storage) IsSuperseded(ctx context.Context, reference *cap.Reference) (bool, error) { // Start a new span. span, sctx := opentracing.StartSpanFromContext(ctx, "Storage.ElasticSearch::IsSuperseded") defer span.Finish() // Save information about the request. if reference.Id != "" { span.SetTag("reference.id", reference.Id) } else { span.SetTag("reference.identifier", reference.Identifier) span.SetTag("reference.sender", reference.Sender) span.SetTag("reference.sent", reference.Sent) } id := reference.Id if id == "" { id = reference.ID() } log.WithField("id", id).Debug("Checking if alert has been superseded") search := s.Client.Search().Index(s.Index).Type("doc") search = search.Size(1) search = search.Query(elastic.NewNestedQuery("references", elastic.NewTermQuery("references.id", id))) search = search.FetchSource(false) res, err := search.Do(sctx) log.Debug("Got response") if err != nil { log.WithError(err).Error("ElasticSearch query failed") raven.CaptureError(err, nil) ext.Error.Set(span, true) span.LogFields(otlog.Error(err)) return false, err } span.SetTag("result", res.TotalHits() > 0) return res.TotalHits() > 0, nil }
{ span := opentracing.SpanFromContext(ctx) rq := elastic.NewRangeQuery(field) if ts.Gte != nil { t, err := ptypes.Timestamp(ts.Gte) if err != nil { return nil, err } span.SetTag(fmt.Sprintf("query.%s.gte", field), t) rq.Gte(t) } if ts.Gt != nil { t, err := ptypes.Timestamp(ts.Gt) if err != nil { return nil, err } span.SetTag(fmt.Sprintf("query.%s.te", field), t) rq.Gt(t) } if ts.Lte != nil { t, err := ptypes.Timestamp(ts.Lte) if err != nil { return nil, err } span.SetTag(fmt.Sprintf("query.%s.lte", field), t) rq.Lte(t) } if ts.Lt != nil { t, err := ptypes.Timestamp(ts.Lt) if err != nil { return nil, err } span.SetTag(fmt.Sprintf("query.%s.lt", field), t) rq.Lt(t) } return rq, nil }
main.go
// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by cloud.google.com/go/internal/gapicgen/gensnippets. DO NOT EDIT. // [START dataflow_v1beta3_generated_JobsV1Beta3_AggregatedListJobs_sync] package main import ( "context" dataflow "cloud.google.com/go/dataflow/apiv1beta3" "google.golang.org/api/iterator" dataflowpb "google.golang.org/genproto/googleapis/dataflow/v1beta3" ) func
() { ctx := context.Background() c, err := dataflow.NewJobsV1Beta3Client(ctx) if err != nil { // TODO: Handle error. } defer c.Close() req := &dataflowpb.ListJobsRequest{ // TODO: Fill request struct fields. // See https://pkg.go.dev/google.golang.org/genproto/googleapis/dataflow/v1beta3#ListJobsRequest. } it := c.AggregatedListJobs(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } // [END dataflow_v1beta3_generated_JobsV1Beta3_AggregatedListJobs_sync]
main
store1.js
export const store1 = 'delayed_action_STORE_1' export function changeStore1(payload) { return (dispatch) => { console.log('changeStore1'); setTimeout(() => { dispatch({ type: store1, payload: { value: payload } });
const initialState = { value: '' }; export default function reducer1(state = initialState, action) { if (action.type === store1) { console.log('reducer1 triggered', action.payload); return action.payload; } return state; }
}, 1000) } }
test_io_tools.py
# Copyright (c) 2016 MetPy Developers. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause """Test the `io.tools` module.""" import numpy as np import pytest from metpy.io._tools import hexdump, UnitLinker from metpy.io.cdm import Dataset from metpy.testing import assert_array_equal, ignore_deprecation from metpy.units import units @pytest.fixture() @ignore_deprecation def test_var(): """Fixture to create a dataset and variable for tests.""" ds = Dataset() ds.createDimension('x', 5) var = ds.createVariable('data', 'f4', ('x',), 5) var[:] = np.arange(5) return var def test_unit_linker(test_var): """Test that UnitLinker successfully adds units.""" test_var.units = 'meters' new_var = UnitLinker(test_var) assert_array_equal(new_var[:], np.arange(5) * units.m) def test_unit_linker_get_units(test_var): """Test that we can get the units from UnitLinker.""" test_var.units = 'knots' new_var = UnitLinker(test_var) assert new_var.units == units('knots') def test_unit_linker_missing(test_var): """Test that UnitLinker works with missing units.""" new_var = UnitLinker(test_var) assert_array_equal(new_var[:], np.arange(5)) def test_unit_linker_bad(test_var): """Test that UnitLinker ignores bad unit strings.""" test_var.units = 'badunit' new_var = UnitLinker(test_var) assert_array_equal(new_var[:], np.arange(5)) def test_unit_override(test_var): """Test that we can override a variable's bad unit string.""" test_var.units = 'C' new_var = UnitLinker(test_var) new_var.units = 'degC' assert_array_equal(new_var[:], np.arange(5) * units.degC) def test_unit_override_obj(test_var): """Test that we can override with an object.""" test_var.units = 'C' new_var = UnitLinker(test_var) new_var.units = units.degC assert_array_equal(new_var[:], np.arange(5) * units.degC) def
(test_var): """Test that we are properly able to access attributes from the variable.""" test_var.att = 'abc' new_var = UnitLinker(test_var) assert new_var.att == test_var.att def test_hexdump(): """Test hexdump tool.""" data = bytearray([77, 101, 116, 80, 121]) assert hexdump(data, 4, width=8) == '4D657450 79------ 0 0 MetPy'
test_attribute_forwarding
sasportal-gen.go
// Copyright 2021 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Code generated file. DO NOT EDIT. // Package sasportal provides access to the SAS Portal API. // // For product documentation, see: https://developers.google.com/spectrum-access-system/ // // Creating a client // // Usage example: // // import "google.golang.org/api/sasportal/v1alpha1" // ... // ctx := context.Background() // sasportalService, err := sasportal.NewService(ctx) // // In this example, Google Application Default Credentials are used for authentication. // // For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // // Other authentication options // // To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: // // sasportalService, err := sasportal.NewService(ctx, option.WithAPIKey("AIza...")) // // To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: // // config := &oauth2.Config{...} // // ... // token, err := config.Exchange(ctx, ...) // sasportalService, err := sasportal.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // // See https://godoc.org/google.golang.org/api/option/ for details on options. package sasportal // import "google.golang.org/api/sasportal/v1alpha1" import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "strconv" "strings" googleapi "google.golang.org/api/googleapi" gensupport "google.golang.org/api/internal/gensupport" option "google.golang.org/api/option" internaloption "google.golang.org/api/option/internaloption" htransport "google.golang.org/api/transport/http" ) // Always reference these packages, just in case the auto-generated code // below doesn't. var _ = bytes.NewBuffer var _ = strconv.Itoa var _ = fmt.Sprintf var _ = json.NewDecoder var _ = io.Copy var _ = url.Parse var _ = gensupport.MarshalJSON var _ = googleapi.Version var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint const apiId = "sasportal:v1alpha1" const apiName = "sasportal" const apiVersion = "v1alpha1" const basePath = "https://sasportal.googleapis.com/" const mtlsBasePath = "https://sasportal.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( // See your primary Google Account email address UserinfoEmailScope = "https://www.googleapis.com/auth/userinfo.email" ) // NewService creates a new Service. func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { scopesOption := option.WithScopes( "https://www.googleapis.com/auth/userinfo.email", ) // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err } s, err := New(client) if err != nil { return nil, err } if endpoint != "" { s.BasePath = endpoint } return s, nil } // New creates a new Service. It uses the provided http.Client for requests. // // Deprecated: please use NewService instead. // To provide a custom HTTP client, use option.WithHTTPClient. // If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") } s := &Service{client: client, BasePath: basePath} s.Customers = NewCustomersService(s) s.Deployments = NewDeploymentsService(s) s.Installer = NewInstallerService(s) s.Nodes = NewNodesService(s) s.Policies = NewPoliciesService(s) return s, nil } type Service struct { client *http.Client BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment Customers *CustomersService Deployments *DeploymentsService Installer *InstallerService Nodes *NodesService Policies *PoliciesService } func (s *Service) userAgent() string { if s.UserAgent == "" { return googleapi.UserAgent } return googleapi.UserAgent + " " + s.UserAgent } func NewCustomersService(s *Service) *CustomersService { rs := &CustomersService{s: s} rs.Deployments = NewCustomersDeploymentsService(s) rs.Devices = NewCustomersDevicesService(s) rs.Nodes = NewCustomersNodesService(s) return rs } type CustomersService struct { s *Service Deployments *CustomersDeploymentsService Devices *CustomersDevicesService Nodes *CustomersNodesService } func NewCustomersDeploymentsService(s *Service) *CustomersDeploymentsService { rs := &CustomersDeploymentsService{s: s} rs.Devices = NewCustomersDeploymentsDevicesService(s) return rs } type CustomersDeploymentsService struct { s *Service Devices *CustomersDeploymentsDevicesService } func NewCustomersDeploymentsDevicesService(s *Service) *CustomersDeploymentsDevicesService { rs := &CustomersDeploymentsDevicesService{s: s} return rs } type CustomersDeploymentsDevicesService struct { s *Service } func NewCustomersDevicesService(s *Service) *CustomersDevicesService { rs := &CustomersDevicesService{s: s} return rs } type CustomersDevicesService struct { s *Service } func NewCustomersNodesService(s *Service) *CustomersNodesService { rs := &CustomersNodesService{s: s} rs.Deployments = NewCustomersNodesDeploymentsService(s) rs.Devices = NewCustomersNodesDevicesService(s) rs.Nodes = NewCustomersNodesNodesService(s) return rs } type CustomersNodesService struct { s *Service Deployments *CustomersNodesDeploymentsService Devices *CustomersNodesDevicesService Nodes *CustomersNodesNodesService } func NewCustomersNodesDeploymentsService(s *Service) *CustomersNodesDeploymentsService { rs := &CustomersNodesDeploymentsService{s: s} return rs } type CustomersNodesDeploymentsService struct { s *Service } func NewCustomersNodesDevicesService(s *Service) *CustomersNodesDevicesService { rs := &CustomersNodesDevicesService{s: s} return rs } type CustomersNodesDevicesService struct { s *Service } func NewCustomersNodesNodesService(s *Service) *CustomersNodesNodesService { rs := &CustomersNodesNodesService{s: s} return rs } type CustomersNodesNodesService struct { s *Service } func NewDeploymentsService(s *Service) *DeploymentsService { rs := &DeploymentsService{s: s} rs.Devices = NewDeploymentsDevicesService(s) return rs } type DeploymentsService struct { s *Service Devices *DeploymentsDevicesService } func NewDeploymentsDevicesService(s *Service) *DeploymentsDevicesService { rs := &DeploymentsDevicesService{s: s} return rs } type DeploymentsDevicesService struct { s *Service } func NewInstallerService(s *Service) *InstallerService { rs := &InstallerService{s: s} return rs } type InstallerService struct { s *Service } func NewNodesService(s *Service) *NodesService { rs := &NodesService{s: s} rs.Deployments = NewNodesDeploymentsService(s) rs.Devices = NewNodesDevicesService(s) rs.Nodes = NewNodesNodesService(s) return rs } type NodesService struct { s *Service Deployments *NodesDeploymentsService Devices *NodesDevicesService Nodes *NodesNodesService } func
(s *Service) *NodesDeploymentsService { rs := &NodesDeploymentsService{s: s} rs.Devices = NewNodesDeploymentsDevicesService(s) return rs } type NodesDeploymentsService struct { s *Service Devices *NodesDeploymentsDevicesService } func NewNodesDeploymentsDevicesService(s *Service) *NodesDeploymentsDevicesService { rs := &NodesDeploymentsDevicesService{s: s} return rs } type NodesDeploymentsDevicesService struct { s *Service } func NewNodesDevicesService(s *Service) *NodesDevicesService { rs := &NodesDevicesService{s: s} return rs } type NodesDevicesService struct { s *Service } func NewNodesNodesService(s *Service) *NodesNodesService { rs := &NodesNodesService{s: s} rs.Deployments = NewNodesNodesDeploymentsService(s) rs.Devices = NewNodesNodesDevicesService(s) rs.Nodes = NewNodesNodesNodesService(s) return rs } type NodesNodesService struct { s *Service Deployments *NodesNodesDeploymentsService Devices *NodesNodesDevicesService Nodes *NodesNodesNodesService } func NewNodesNodesDeploymentsService(s *Service) *NodesNodesDeploymentsService { rs := &NodesNodesDeploymentsService{s: s} return rs } type NodesNodesDeploymentsService struct { s *Service } func NewNodesNodesDevicesService(s *Service) *NodesNodesDevicesService { rs := &NodesNodesDevicesService{s: s} return rs } type NodesNodesDevicesService struct { s *Service } func NewNodesNodesNodesService(s *Service) *NodesNodesNodesService { rs := &NodesNodesNodesService{s: s} return rs } type NodesNodesNodesService struct { s *Service } func NewPoliciesService(s *Service) *PoliciesService { rs := &PoliciesService{s: s} return rs } type PoliciesService struct { s *Service } // SasPortalAssignment: Associates `members` with a `role`. type SasPortalAssignment struct { // Members: The identities the role is assigned to. It can have the // following values: * `{user_email}`: An email address that represents // a specific Google account. For example: `[email protected]`. * // `{group_email}`: An email address that represents a Google group. For // example, `[email protected]`. Members []string `json:"members,omitempty"` // Role: Required. Role that is assigned to `members`. Role string `json:"role,omitempty"` // ForceSendFields is a list of field names (e.g. "Members") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Members") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalAssignment) MarshalJSON() ([]byte, error) { type NoMethod SasPortalAssignment raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalChannelWithScore: The channel with score. type SasPortalChannelWithScore struct { // FrequencyRange: The frequency range of the channel. FrequencyRange *SasPortalFrequencyRange `json:"frequencyRange,omitempty"` // Score: The channel score, normalized to be in [0,100]. Score float64 `json:"score,omitempty"` // ForceSendFields is a list of field names (e.g. "FrequencyRange") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "FrequencyRange") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *SasPortalChannelWithScore) MarshalJSON() ([]byte, error) { type NoMethod SasPortalChannelWithScore raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *SasPortalChannelWithScore) UnmarshalJSON(data []byte) error { type NoMethod SasPortalChannelWithScore var s1 struct { Score gensupport.JSONFloat64 `json:"score"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Score = float64(s1.Score) return nil } // SasPortalCreateSignedDeviceRequest: Request for CreateSignedDevice. type SasPortalCreateSignedDeviceRequest struct { // EncodedDevice: Required. JSON Web Token signed using a CPI private // key. Payload must be the JSON encoding of the device. The user_id // field must be set. EncodedDevice string `json:"encodedDevice,omitempty"` // InstallerId: Required. Unique installer id (CPI ID) from the // Certified Professional Installers database. InstallerId string `json:"installerId,omitempty"` // ForceSendFields is a list of field names (e.g. "EncodedDevice") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "EncodedDevice") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalCreateSignedDeviceRequest) MarshalJSON() ([]byte, error) { type NoMethod SasPortalCreateSignedDeviceRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalCustomer: Entity representing a SAS customer. type SasPortalCustomer struct { // DisplayName: Required. Name of the organization that the customer // entity represents. DisplayName string `json:"displayName,omitempty"` // Name: Output only. Resource name of the customer. Name string `json:"name,omitempty"` // SasUserIds: User IDs used by the devices belonging to this customer. SasUserIds []string `json:"sasUserIds,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "DisplayName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DisplayName") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalCustomer) MarshalJSON() ([]byte, error) { type NoMethod SasPortalCustomer raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalDeployment: The Deployment. type SasPortalDeployment struct { // AllowedBillingModes: The allowed billing modes under this deployment. // // Possible values: // "BILLING_MODE_UNSPECIFIED" - Billing mode has not been specified. // "MOBILE" - Price is based on category of CBSD: Category A, Category // B registered with SAS. // "FIXED_WIRELESS" - Price is based on type of CBSD: Base station or // CPE. AllowedBillingModes []string `json:"allowedBillingModes,omitempty"` // DefaultBillingMode: Default billing mode for the deployment and // devices under it. // // Possible values: // "BILLING_MODE_UNSPECIFIED" - Billing mode has not been specified. // "MOBILE" - Price is based on category of CBSD: Category A, Category // B registered with SAS. // "FIXED_WIRELESS" - Price is based on type of CBSD: Base station or // CPE. DefaultBillingMode string `json:"defaultBillingMode,omitempty"` // DisplayName: The deployment's display name. DisplayName string `json:"displayName,omitempty"` // Name: Output only. Resource name. Name string `json:"name,omitempty"` // SasUserIds: User ID used by the devices belonging to this deployment. // Each deployment should be associated with one unique user ID. SasUserIds []string `json:"sasUserIds,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "AllowedBillingModes") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "AllowedBillingModes") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *SasPortalDeployment) MarshalJSON() ([]byte, error) { type NoMethod SasPortalDeployment raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type SasPortalDevice struct { // ActiveConfig: Output only. Current configuration of the device as // registered to the SAS. ActiveConfig *SasPortalDeviceConfig `json:"activeConfig,omitempty"` // CurrentChannels: Output only. Current channels with scores. CurrentChannels []*SasPortalChannelWithScore `json:"currentChannels,omitempty"` // DeviceMetadata: Device parameters that can be overridden by both SAS // Portal and SAS registration requests. DeviceMetadata *SasPortalDeviceMetadata `json:"deviceMetadata,omitempty"` // DisplayName: Device display name. DisplayName string `json:"displayName,omitempty"` // FccId: The FCC identifier of the device. FccId string `json:"fccId,omitempty"` // GrantRangeAllowlists: Only ranges within the allowlists are available // for new grants. GrantRangeAllowlists []*SasPortalFrequencyRange `json:"grantRangeAllowlists,omitempty"` // Grants: Output only. Grants held by the device. Grants []*SasPortalDeviceGrant `json:"grants,omitempty"` // Name: Output only. The resource path name. Name string `json:"name,omitempty"` // PreloadedConfig: Configuration of the device, as specified via SAS // Portal API. PreloadedConfig *SasPortalDeviceConfig `json:"preloadedConfig,omitempty"` // SerialNumber: A serial number assigned to the device by the device // manufacturer. SerialNumber string `json:"serialNumber,omitempty"` // State: Output only. Device state. // // Possible values: // "DEVICE_STATE_UNSPECIFIED" - Unspecified state. // "RESERVED" - Device created in the SAS Portal, however, not yet // registered with SAS. // "REGISTERED" - Device registered with SAS. // "DEREGISTERED" - Device de-registered with SAS. State string `json:"state,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "ActiveConfig") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ActiveConfig") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalDevice) MarshalJSON() ([]byte, error) { type NoMethod SasPortalDevice raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalDeviceAirInterface: Information about the device's air // interface. type SasPortalDeviceAirInterface struct { // RadioTechnology: Conditional. This field specifies the radio access // technology that is used for the CBSD. // // Possible values: // "RADIO_TECHNOLOGY_UNSPECIFIED" // "E_UTRA" // "CAMBIUM_NETWORKS" // "FOUR_G_BBW_SAA_1" // "NR" // "DOODLE_CBRS" // "CW" // "REDLINE" // "TARANA_WIRELESS" RadioTechnology string `json:"radioTechnology,omitempty"` // SupportedSpec: Optional. This field is related to the // `radioTechnology` and provides the air interface specification that // the CBSD is compliant with at the time of registration. SupportedSpec string `json:"supportedSpec,omitempty"` // ForceSendFields is a list of field names (e.g. "RadioTechnology") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "RadioTechnology") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *SasPortalDeviceAirInterface) MarshalJSON() ([]byte, error) { type NoMethod SasPortalDeviceAirInterface raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalDeviceConfig: Information about the device configuration. type SasPortalDeviceConfig struct { // AirInterface: Information about this device's air interface. AirInterface *SasPortalDeviceAirInterface `json:"airInterface,omitempty"` // CallSign: The call sign of the device operator. CallSign string `json:"callSign,omitempty"` // Category: FCC category of the device. // // Possible values: // "DEVICE_CATEGORY_UNSPECIFIED" - Unspecified device category. // "DEVICE_CATEGORY_A" - Category A. // "DEVICE_CATEGORY_B" - Category B. Category string `json:"category,omitempty"` // InstallationParams: Installation parameters for the device. InstallationParams *SasPortalInstallationParams `json:"installationParams,omitempty"` // IsSigned: Output only. Whether the configuration has been signed by a // CPI. IsSigned bool `json:"isSigned,omitempty"` // MeasurementCapabilities: Measurement reporting capabilities of the // device. // // Possible values: // "MEASUREMENT_CAPABILITY_UNSPECIFIED" // "MEASUREMENT_CAPABILITY_RECEIVED_POWER_WITH_GRANT" // "MEASUREMENT_CAPABILITY_RECEIVED_POWER_WITHOUT_GRANT" MeasurementCapabilities []string `json:"measurementCapabilities,omitempty"` // Model: Information about this device model. Model *SasPortalDeviceModel `json:"model,omitempty"` // State: State of the configuration. // // Possible values: // "DEVICE_CONFIG_STATE_UNSPECIFIED" // "DRAFT" // "FINAL" State string `json:"state,omitempty"` // UpdateTime: Output only. The last time the device configuration was // edited. UpdateTime string `json:"updateTime,omitempty"` // UserId: The identifier of a device user. UserId string `json:"userId,omitempty"` // ForceSendFields is a list of field names (e.g. "AirInterface") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "AirInterface") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalDeviceConfig) MarshalJSON() ([]byte, error) { type NoMethod SasPortalDeviceConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalDeviceGrant: Device grant. It is an authorization provided // by the Spectrum Access System to a device to transmit using specified // operating parameters after a successful heartbeat by the device. type SasPortalDeviceGrant struct { // ChannelType: Type of channel used. // // Possible values: // "CHANNEL_TYPE_UNSPECIFIED" // "CHANNEL_TYPE_GAA" // "CHANNEL_TYPE_PAL" ChannelType string `json:"channelType,omitempty"` // ExpireTime: The expiration time of the grant. ExpireTime string `json:"expireTime,omitempty"` // FrequencyRange: The transmission frequency range. FrequencyRange *SasPortalFrequencyRange `json:"frequencyRange,omitempty"` // GrantId: Grant Id. GrantId string `json:"grantId,omitempty"` // MaxEirp: Maximum Equivalent Isotropically Radiated Power (EIRP) // permitted by the grant. The maximum EIRP is in units of dBm/MHz. The // value of `maxEirp` represents the average (RMS) EIRP that would be // measured by the procedure defined in FCC part 96.41(e)(3). MaxEirp float64 `json:"maxEirp,omitempty"` // MoveList: The DPA move lists on which this grant appears. MoveList []*SasPortalDpaMoveList `json:"moveList,omitempty"` // State: State of the grant. // // Possible values: // "GRANT_STATE_UNSPECIFIED" // "GRANT_STATE_GRANTED" - The grant has been granted but the device // is not heartbeating on it. // "GRANT_STATE_TERMINATED" - The grant has been terminated by the // SAS. // "GRANT_STATE_SUSPENDED" - The grant has been suspended by the SAS. // "GRANT_STATE_AUTHORIZED" - The device is currently transmitting. // "GRANT_STATE_EXPIRED" - The grant has expired. State string `json:"state,omitempty"` // SuspensionReason: If the grant is suspended, the reason(s) for // suspension. SuspensionReason []string `json:"suspensionReason,omitempty"` // ForceSendFields is a list of field names (e.g. "ChannelType") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ChannelType") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalDeviceGrant) MarshalJSON() ([]byte, error) { type NoMethod SasPortalDeviceGrant raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *SasPortalDeviceGrant) UnmarshalJSON(data []byte) error { type NoMethod SasPortalDeviceGrant var s1 struct { MaxEirp gensupport.JSONFloat64 `json:"maxEirp"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.MaxEirp = float64(s1.MaxEirp) return nil } // SasPortalDeviceMetadata: Device data overridable by both SAS Portal // and registration requests. type SasPortalDeviceMetadata struct { } // SasPortalDeviceModel: Information about the model of the device. type SasPortalDeviceModel struct { // FirmwareVersion: The firmware version of the device. FirmwareVersion string `json:"firmwareVersion,omitempty"` // HardwareVersion: The hardware version of the device. HardwareVersion string `json:"hardwareVersion,omitempty"` // Name: The name of the device model. Name string `json:"name,omitempty"` // SoftwareVersion: The software version of the device. SoftwareVersion string `json:"softwareVersion,omitempty"` // Vendor: The name of the device vendor. Vendor string `json:"vendor,omitempty"` // ForceSendFields is a list of field names (e.g. "FirmwareVersion") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "FirmwareVersion") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *SasPortalDeviceModel) MarshalJSON() ([]byte, error) { type NoMethod SasPortalDeviceModel raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalDpaMoveList: An entry in a DPA's move list. type SasPortalDpaMoveList struct { // DpaId: The ID of the DPA. DpaId string `json:"dpaId,omitempty"` // FrequencyRange: The frequency range that the move list affects. FrequencyRange *SasPortalFrequencyRange `json:"frequencyRange,omitempty"` // ForceSendFields is a list of field names (e.g. "DpaId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DpaId") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalDpaMoveList) MarshalJSON() ([]byte, error) { type NoMethod SasPortalDpaMoveList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalEmpty: A generic empty message that you can re-use to avoid // defining duplicated empty messages in your APIs. A typical example is // to use it as the request or the response type of an API method. For // instance: service Foo { rpc Bar(google.protobuf.Empty) returns // (google.protobuf.Empty); } The JSON representation for `Empty` is // empty JSON object `{}`. type SasPortalEmpty struct { // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` } // SasPortalFrequencyRange: Frequency range from `low_frequency` to // `high_frequency`. type SasPortalFrequencyRange struct { // HighFrequencyMhz: The highest frequency of the frequency range in // MHz. HighFrequencyMhz float64 `json:"highFrequencyMhz,omitempty"` // LowFrequencyMhz: The lowest frequency of the frequency range in MHz. LowFrequencyMhz float64 `json:"lowFrequencyMhz,omitempty"` // ForceSendFields is a list of field names (e.g. "HighFrequencyMhz") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "HighFrequencyMhz") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *SasPortalFrequencyRange) MarshalJSON() ([]byte, error) { type NoMethod SasPortalFrequencyRange raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *SasPortalFrequencyRange) UnmarshalJSON(data []byte) error { type NoMethod SasPortalFrequencyRange var s1 struct { HighFrequencyMhz gensupport.JSONFloat64 `json:"highFrequencyMhz"` LowFrequencyMhz gensupport.JSONFloat64 `json:"lowFrequencyMhz"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.HighFrequencyMhz = float64(s1.HighFrequencyMhz) s.LowFrequencyMhz = float64(s1.LowFrequencyMhz) return nil } // SasPortalGenerateSecretRequest: Request for GenerateSecret. type SasPortalGenerateSecretRequest struct { } // SasPortalGenerateSecretResponse: Response for GenerateSecret. type SasPortalGenerateSecretResponse struct { // Secret: The secret generated by the string and used by // ValidateInstaller. Secret string `json:"secret,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Secret") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Secret") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalGenerateSecretResponse) MarshalJSON() ([]byte, error) { type NoMethod SasPortalGenerateSecretResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalGetPolicyRequest: Request message for `GetPolicy` method. type SasPortalGetPolicyRequest struct { // Resource: Required. The resource for which the policy is being // requested. Resource string `json:"resource,omitempty"` // ForceSendFields is a list of field names (e.g. "Resource") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Resource") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalGetPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod SasPortalGetPolicyRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalInstallationParams: Information about the device // installation parameters. type SasPortalInstallationParams struct { // AntennaAzimuth: Boresight direction of the horizontal plane of the // antenna in degrees with respect to true north. The value of this // parameter is an integer with a value between 0 and 359 inclusive. A // value of 0 degrees means true north; a value of 90 degrees means // east. This parameter is optional for Category A devices and // conditional for Category B devices. AntennaAzimuth int64 `json:"antennaAzimuth,omitempty"` // AntennaBeamwidth: 3-dB antenna beamwidth of the antenna in the // horizontal-plane in degrees. This parameter is an unsigned integer // having a value between 0 and 360 (degrees) inclusive; it is optional // for Category A devices and conditional for Category B devices. AntennaBeamwidth int64 `json:"antennaBeamwidth,omitempty"` // AntennaDowntilt: Antenna downtilt in degrees and is an integer with a // value between -90 and +90 inclusive; a negative value means the // antenna is tilted up (above horizontal). This parameter is optional // for Category A devices and conditional for Category B devices. AntennaDowntilt int64 `json:"antennaDowntilt,omitempty"` // AntennaGain: Peak antenna gain in dBi. This parameter is an integer // with a value between -127 and +128 (dBi) inclusive. AntennaGain int64 `json:"antennaGain,omitempty"` // AntennaModel: If an external antenna is used, the antenna model is // optionally provided in this field. The string has a maximum length of // 128 octets. AntennaModel string `json:"antennaModel,omitempty"` // CpeCbsdIndication: If present, this parameter specifies whether the // CBSD is a CPE-CBSD or not. CpeCbsdIndication bool `json:"cpeCbsdIndication,omitempty"` // EirpCapability: This parameter is the maximum device EIRP in units of // dBm/10MHz and is an integer with a value between -127 and +47 (dBm/10 // MHz) inclusive. If not included, SAS interprets it as maximum // allowable EIRP in units of dBm/10MHz for device category. EirpCapability int64 `json:"eirpCapability,omitempty"` // Height: Device antenna height in meters. When the `heightType` // parameter value is "AGL", the antenna height should be given relative // to ground level. When the `heightType` parameter value is "AMSL", it // is given with respect to WGS84 datum. Height float64 `json:"height,omitempty"` // HeightType: Specifies how the height is measured. // // Possible values: // "HEIGHT_TYPE_UNSPECIFIED" - Unspecified height type. // "HEIGHT_TYPE_AGL" - AGL height is measured relative to the ground // level. // "HEIGHT_TYPE_AMSL" - AMSL height is measured relative to the mean // sea level. HeightType string `json:"heightType,omitempty"` // HorizontalAccuracy: A positive number in meters to indicate accuracy // of the device antenna horizontal location. This optional parameter // should only be present if its value is less than the FCC requirement // of 50 meters. HorizontalAccuracy float64 `json:"horizontalAccuracy,omitempty"` // IndoorDeployment: Whether the device antenna is indoor or not. // `true`: indoor. `false`: outdoor. IndoorDeployment bool `json:"indoorDeployment,omitempty"` // Latitude: Latitude of the device antenna location in degrees relative // to the WGS 84 datum. The allowed range is from -90.000000 to // +90.000000. Positive values represent latitudes north of the equator; // negative values south of the equator. Latitude float64 `json:"latitude,omitempty"` // Longitude: Longitude of the device antenna location in degrees // relative to the WGS 84 datum. The allowed range is from -180.000000 // to +180.000000. Positive values represent longitudes east of the // prime meridian; negative values west of the prime meridian. Longitude float64 `json:"longitude,omitempty"` // VerticalAccuracy: A positive number in meters to indicate accuracy of // the device antenna vertical location. This optional parameter should // only be present if its value is less than the FCC requirement of 3 // meters. VerticalAccuracy float64 `json:"verticalAccuracy,omitempty"` // ForceSendFields is a list of field names (e.g. "AntennaAzimuth") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "AntennaAzimuth") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *SasPortalInstallationParams) MarshalJSON() ([]byte, error) { type NoMethod SasPortalInstallationParams raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *SasPortalInstallationParams) UnmarshalJSON(data []byte) error { type NoMethod SasPortalInstallationParams var s1 struct { Height gensupport.JSONFloat64 `json:"height"` HorizontalAccuracy gensupport.JSONFloat64 `json:"horizontalAccuracy"` Latitude gensupport.JSONFloat64 `json:"latitude"` Longitude gensupport.JSONFloat64 `json:"longitude"` VerticalAccuracy gensupport.JSONFloat64 `json:"verticalAccuracy"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Height = float64(s1.Height) s.HorizontalAccuracy = float64(s1.HorizontalAccuracy) s.Latitude = float64(s1.Latitude) s.Longitude = float64(s1.Longitude) s.VerticalAccuracy = float64(s1.VerticalAccuracy) return nil } // SasPortalListCustomersResponse: Response for `ListCustomers`. type SasPortalListCustomersResponse struct { // Customers: The list of customers that match the request. Customers []*SasPortalCustomer `json:"customers,omitempty"` // NextPageToken: A pagination token returned from a previous call to // ListCustomers that indicates from where listing should continue. If // the field is missing or empty, it means there are no more customers. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Customers") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Customers") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalListCustomersResponse) MarshalJSON() ([]byte, error) { type NoMethod SasPortalListCustomersResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalListDeploymentsResponse: Response for ListDeployments. type SasPortalListDeploymentsResponse struct { // Deployments: The deployments that match the request. Deployments []*SasPortalDeployment `json:"deployments,omitempty"` // NextPageToken: A pagination token returned from a previous call to // ListDeployments that indicates from where listing should continue. If // the field is missing or empty, it means there are no more // deployments. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Deployments") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Deployments") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalListDeploymentsResponse) MarshalJSON() ([]byte, error) { type NoMethod SasPortalListDeploymentsResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalListDevicesResponse: Response for ListDevices. type SasPortalListDevicesResponse struct { // Devices: The devices that match the request. Devices []*SasPortalDevice `json:"devices,omitempty"` // NextPageToken: A pagination token returned from a previous call to // ListDevices that indicates from where listing should continue. If the // field is missing or empty, it means there is no more devices. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Devices") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Devices") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalListDevicesResponse) MarshalJSON() ([]byte, error) { type NoMethod SasPortalListDevicesResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalListNodesResponse: Response for ListNodes. type SasPortalListNodesResponse struct { // NextPageToken: A pagination token returned from a previous call to // ListNodes that indicates from where listing should continue. If the // field is missing or empty, it means there is no more nodes. NextPageToken string `json:"nextPageToken,omitempty"` // Nodes: The nodes that match the request. Nodes []*SasPortalNode `json:"nodes,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "NextPageToken") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "NextPageToken") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalListNodesResponse) MarshalJSON() ([]byte, error) { type NoMethod SasPortalListNodesResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalMoveDeploymentRequest: Request for MoveDeployment. type SasPortalMoveDeploymentRequest struct { // Destination: Required. The name of the new parent resource node or // customer to reparent the deployment under. Destination string `json:"destination,omitempty"` // ForceSendFields is a list of field names (e.g. "Destination") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Destination") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalMoveDeploymentRequest) MarshalJSON() ([]byte, error) { type NoMethod SasPortalMoveDeploymentRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalMoveDeviceRequest: Request for MoveDevice. type SasPortalMoveDeviceRequest struct { // Destination: Required. The name of the new parent resource node or // customer to reparent the device under. Destination string `json:"destination,omitempty"` // ForceSendFields is a list of field names (e.g. "Destination") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Destination") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalMoveDeviceRequest) MarshalJSON() ([]byte, error) { type NoMethod SasPortalMoveDeviceRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalMoveNodeRequest: Request for MoveNode. type SasPortalMoveNodeRequest struct { // Destination: Required. The name of the new parent resource node or // customer to reparent the node under. Destination string `json:"destination,omitempty"` // ForceSendFields is a list of field names (e.g. "Destination") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Destination") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalMoveNodeRequest) MarshalJSON() ([]byte, error) { type NoMethod SasPortalMoveNodeRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalNode: The Node. type SasPortalNode struct { // DisplayName: The node's display name. DisplayName string `json:"displayName,omitempty"` // Name: Output only. Resource name. Name string `json:"name,omitempty"` // SasUserIds: User ids used by the devices belonging to this node. SasUserIds []string `json:"sasUserIds,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "DisplayName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DisplayName") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalNode) MarshalJSON() ([]byte, error) { type NoMethod SasPortalNode raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalOperation: This resource represents a long-running operation // that is the result of a network API call. type SasPortalOperation struct { // Done: If the value is `false`, it means the operation is still in // progress. If `true`, the operation is completed, and either `error` // or `response` is available. Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or // cancellation. Error *SasPortalStatus `json:"error,omitempty"` // Metadata: Service-specific metadata associated with the operation. It // typically contains progress information and common metadata such as // create time. Some services might not provide such metadata. Any // method that returns a long-running operation should document the // metadata type, if any. Metadata googleapi.RawMessage `json:"metadata,omitempty"` // Name: The server-assigned name, which is only unique within the same // service that originally returns it. If you use the default HTTP // mapping, the `name` should be a resource name ending with // `operations/{unique_id}`. Name string `json:"name,omitempty"` // Response: The normal response of the operation in case of success. If // the original method returns no data on success, such as `Delete`, the // response is `google.protobuf.Empty`. If the original method is // standard `Get`/`Create`/`Update`, the response should be the // resource. For other methods, the response should have the type // `XxxResponse`, where `Xxx` is the original method name. For example, // if the original method name is `TakeSnapshot()`, the inferred // response type is `TakeSnapshotResponse`. Response googleapi.RawMessage `json:"response,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Done") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Done") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalOperation) MarshalJSON() ([]byte, error) { type NoMethod SasPortalOperation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalPolicy: Defines an access control policy to the resources. type SasPortalPolicy struct { // Assignments: List of assignments Assignments []*SasPortalAssignment `json:"assignments,omitempty"` // Etag: The etag is used for optimistic concurrency control as a way to // help prevent simultaneous updates of a policy from overwriting each // other. It is strongly suggested that systems make use of the etag in // the read-modify-write cycle to perform policy updates in order to // avoid race conditions: An etag is returned in the response to // GetPolicy, and systems are expected to put that etag in the request // to SetPolicy to ensure that their change will be applied to the same // version of the policy. If no etag is provided in the call to // GetPolicy, then the existing policy is overwritten blindly. Etag string `json:"etag,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Assignments") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Assignments") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalPolicy) MarshalJSON() ([]byte, error) { type NoMethod SasPortalPolicy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalSetPolicyRequest: Request message for `SetPolicy` method. type SasPortalSetPolicyRequest struct { // Policy: Required. The policy to be applied to the `resource`. Policy *SasPortalPolicy `json:"policy,omitempty"` // Resource: Required. The resource for which the policy is being // specified. This policy replaces any existing policy. Resource string `json:"resource,omitempty"` // ForceSendFields is a list of field names (e.g. "Policy") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Policy") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalSetPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod SasPortalSetPolicyRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalSignDeviceRequest: Request for SignDevice. type SasPortalSignDeviceRequest struct { // Device: Required. The device to sign. The device fields name, fcc_id // and serial_number must be set. The user_id field must be set. Device *SasPortalDevice `json:"device,omitempty"` // ForceSendFields is a list of field names (e.g. "Device") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Device") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalSignDeviceRequest) MarshalJSON() ([]byte, error) { type NoMethod SasPortalSignDeviceRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalStatus: The `Status` type defines a logical error model that // is suitable for different programming environments, including REST // APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each // `Status` message contains three pieces of data: error code, error // message, and error details. You can find out more about this error // model and how to work with it in the API Design Guide // (https://cloud.google.com/apis/design/errors). type SasPortalStatus struct { // Code: The status code, which should be an enum value of // google.rpc.Code. Code int64 `json:"code,omitempty"` // Details: A list of messages that carry the error details. There is a // common set of message types for APIs to use. Details []googleapi.RawMessage `json:"details,omitempty"` // Message: A developer-facing error message, which should be in // English. Any user-facing error message should be localized and sent // in the google.rpc.Status.details field, or localized by the client. Message string `json:"message,omitempty"` // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalStatus) MarshalJSON() ([]byte, error) { type NoMethod SasPortalStatus raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalTestPermissionsRequest: Request message for // `TestPermissions` method. type SasPortalTestPermissionsRequest struct { // Permissions: The set of permissions to check for the `resource`. Permissions []string `json:"permissions,omitempty"` // Resource: Required. The resource for which the permissions are being // requested. Resource string `json:"resource,omitempty"` // ForceSendFields is a list of field names (e.g. "Permissions") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Permissions") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalTestPermissionsRequest) MarshalJSON() ([]byte, error) { type NoMethod SasPortalTestPermissionsRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalTestPermissionsResponse: Response message for // `TestPermissions` method. type SasPortalTestPermissionsResponse struct { // Permissions: A set of permissions that the caller is allowed. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Permissions") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Permissions") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalTestPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod SasPortalTestPermissionsResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalUpdateSignedDeviceRequest: Request for UpdateSignedDevice. type SasPortalUpdateSignedDeviceRequest struct { // EncodedDevice: Required. The JSON Web Token signed using a CPI // private key. Payload must be the JSON encoding of the device. The // user_id field must be set. EncodedDevice string `json:"encodedDevice,omitempty"` // InstallerId: Required. Unique installer ID (CPI ID) from the // Certified Professional Installers database. InstallerId string `json:"installerId,omitempty"` // ForceSendFields is a list of field names (e.g. "EncodedDevice") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "EncodedDevice") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalUpdateSignedDeviceRequest) MarshalJSON() ([]byte, error) { type NoMethod SasPortalUpdateSignedDeviceRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalValidateInstallerRequest: Request for ValidateInstaller. type SasPortalValidateInstallerRequest struct { // EncodedSecret: Required. JSON Web Token signed using a CPI private // key. Payload must include a "secret" claim whose value is the secret. EncodedSecret string `json:"encodedSecret,omitempty"` // InstallerId: Required. Unique installer id (CPI ID) from the // Certified Professional Installers database. InstallerId string `json:"installerId,omitempty"` // Secret: Required. Secret returned by the GenerateSecret. Secret string `json:"secret,omitempty"` // ForceSendFields is a list of field names (e.g. "EncodedSecret") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "EncodedSecret") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SasPortalValidateInstallerRequest) MarshalJSON() ([]byte, error) { type NoMethod SasPortalValidateInstallerRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SasPortalValidateInstallerResponse: Response for ValidateInstaller. type SasPortalValidateInstallerResponse struct { // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` } // method id "sasportal.customers.get": type CustomersGetCall struct { s *Service name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Returns a requested customer. // // - name: The name of the customer. func (r *CustomersService) Get(name string) *CustomersGetCall { c := &CustomersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersGetCall) Fields(s ...googleapi.Field) *CustomersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *CustomersGetCall) IfNoneMatch(entityTag string) *CustomersGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersGetCall) Context(ctx context.Context) *CustomersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.get" call. // Exactly one of *SasPortalCustomer or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *SasPortalCustomer.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersGetCall) Do(opts ...googleapi.CallOption) (*SasPortalCustomer, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalCustomer{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Returns a requested customer.", // "flatPath": "v1alpha1/customers/{customersId}", // "httpMethod": "GET", // "id": "sasportal.customers.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the customer.", // "location": "path", // "pattern": "^customers/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "response": { // "$ref": "SasPortalCustomer" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.list": type CustomersListCall struct { s *Service urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Returns a list of requested customers. func (r *CustomersService) List() *CustomersListCall { c := &CustomersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} return c } // PageSize sets the optional parameter "pageSize": The maximum number // of customers to return in the response. func (c *CustomersListCall) PageSize(pageSize int64) *CustomersListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A pagination token // returned from a previous call to ListCustomers that indicates where // this listing should continue from. func (c *CustomersListCall) PageToken(pageToken string) *CustomersListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersListCall) Fields(s ...googleapi.Field) *CustomersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *CustomersListCall) IfNoneMatch(entityTag string) *CustomersListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersListCall) Context(ctx context.Context) *CustomersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/customers") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.list" call. // Exactly one of *SasPortalListCustomersResponse or error will be // non-nil. Any non-2xx status code is an error. Response headers are in // either *SasPortalListCustomersResponse.ServerResponse.Header or (if a // response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersListCall) Do(opts ...googleapi.CallOption) (*SasPortalListCustomersResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalListCustomersResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Returns a list of requested customers.", // "flatPath": "v1alpha1/customers", // "httpMethod": "GET", // "id": "sasportal.customers.list", // "parameterOrder": [], // "parameters": { // "pageSize": { // "description": "The maximum number of customers to return in the response.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "A pagination token returned from a previous call to ListCustomers that indicates where this listing should continue from.", // "location": "query", // "type": "string" // } // }, // "path": "v1alpha1/customers", // "response": { // "$ref": "SasPortalListCustomersResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *CustomersListCall) Pages(ctx context.Context, f func(*SasPortalListCustomersResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "sasportal.customers.patch": type CustomersPatchCall struct { s *Service name string sasportalcustomer *SasPortalCustomer urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Patch: Updates an existing customer. // // - name: Output only. Resource name of the customer. func (r *CustomersService) Patch(name string, sasportalcustomer *SasPortalCustomer) *CustomersPatchCall { c := &CustomersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportalcustomer = sasportalcustomer return c } // UpdateMask sets the optional parameter "updateMask": Fields to be // updated. func (c *CustomersPatchCall) UpdateMask(updateMask string) *CustomersPatchCall { c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersPatchCall) Fields(s ...googleapi.Field) *CustomersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersPatchCall) Context(ctx context.Context) *CustomersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalcustomer) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.patch" call. // Exactly one of *SasPortalCustomer or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *SasPortalCustomer.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersPatchCall) Do(opts ...googleapi.CallOption) (*SasPortalCustomer, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalCustomer{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Updates an existing customer.", // "flatPath": "v1alpha1/customers/{customersId}", // "httpMethod": "PATCH", // "id": "sasportal.customers.patch", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Output only. Resource name of the customer.", // "location": "path", // "pattern": "^customers/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { // "description": "Fields to be updated.", // "format": "google-fieldmask", // "location": "query", // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "request": { // "$ref": "SasPortalCustomer" // }, // "response": { // "$ref": "SasPortalCustomer" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.deployments.create": type CustomersDeploymentsCreateCall struct { s *Service parent string sasportaldeployment *SasPortalDeployment urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Create: Creates a new deployment. // // - parent: The parent resource name where the deployment is to be // created. func (r *CustomersDeploymentsService) Create(parent string, sasportaldeployment *SasPortalDeployment) *CustomersDeploymentsCreateCall { c := &CustomersDeploymentsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.sasportaldeployment = sasportaldeployment return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersDeploymentsCreateCall) Fields(s ...googleapi.Field) *CustomersDeploymentsCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersDeploymentsCreateCall) Context(ctx context.Context) *CustomersDeploymentsCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersDeploymentsCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersDeploymentsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportaldeployment) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/deployments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.deployments.create" call. // Exactly one of *SasPortalDeployment or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *SasPortalDeployment.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersDeploymentsCreateCall) Do(opts ...googleapi.CallOption) (*SasPortalDeployment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDeployment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a new deployment.", // "flatPath": "v1alpha1/customers/{customersId}/deployments", // "httpMethod": "POST", // "id": "sasportal.customers.deployments.create", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { // "description": "Required. The parent resource name where the deployment is to be created.", // "location": "path", // "pattern": "^customers/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/deployments", // "request": { // "$ref": "SasPortalDeployment" // }, // "response": { // "$ref": "SasPortalDeployment" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.deployments.delete": type CustomersDeploymentsDeleteCall struct { s *Service name string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Delete: Deletes a deployment. // // - name: The name of the deployment. func (r *CustomersDeploymentsService) Delete(name string) *CustomersDeploymentsDeleteCall { c := &CustomersDeploymentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersDeploymentsDeleteCall) Fields(s ...googleapi.Field) *CustomersDeploymentsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersDeploymentsDeleteCall) Context(ctx context.Context) *CustomersDeploymentsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersDeploymentsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersDeploymentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.deployments.delete" call. // Exactly one of *SasPortalEmpty or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalEmpty.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersDeploymentsDeleteCall) Do(opts ...googleapi.CallOption) (*SasPortalEmpty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalEmpty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Deletes a deployment.", // "flatPath": "v1alpha1/customers/{customersId}/deployments/{deploymentsId}", // "httpMethod": "DELETE", // "id": "sasportal.customers.deployments.delete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the deployment.", // "location": "path", // "pattern": "^customers/[^/]+/deployments/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "response": { // "$ref": "SasPortalEmpty" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.deployments.get": type CustomersDeploymentsGetCall struct { s *Service name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Returns a requested deployment. // // - name: The name of the deployment. func (r *CustomersDeploymentsService) Get(name string) *CustomersDeploymentsGetCall { c := &CustomersDeploymentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersDeploymentsGetCall) Fields(s ...googleapi.Field) *CustomersDeploymentsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *CustomersDeploymentsGetCall) IfNoneMatch(entityTag string) *CustomersDeploymentsGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersDeploymentsGetCall) Context(ctx context.Context) *CustomersDeploymentsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersDeploymentsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersDeploymentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.deployments.get" call. // Exactly one of *SasPortalDeployment or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *SasPortalDeployment.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersDeploymentsGetCall) Do(opts ...googleapi.CallOption) (*SasPortalDeployment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDeployment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Returns a requested deployment.", // "flatPath": "v1alpha1/customers/{customersId}/deployments/{deploymentsId}", // "httpMethod": "GET", // "id": "sasportal.customers.deployments.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the deployment.", // "location": "path", // "pattern": "^customers/[^/]+/deployments/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "response": { // "$ref": "SasPortalDeployment" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.deployments.list": type CustomersDeploymentsListCall struct { s *Service parent string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Lists deployments. // // - parent: The parent resource name, for example, "nodes/1", // customer/1/nodes/2. func (r *CustomersDeploymentsService) List(parent string) *CustomersDeploymentsListCall { c := &CustomersDeploymentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } // Filter sets the optional parameter "filter": The filter expression. // The filter should have the following format: "DIRECT_CHILDREN" or // format: "direct_children". The filter is case insensitive. If empty, // then no deployments are filtered. func (c *CustomersDeploymentsListCall) Filter(filter string) *CustomersDeploymentsListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The maximum number // of deployments to return in the response. func (c *CustomersDeploymentsListCall) PageSize(pageSize int64) *CustomersDeploymentsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A pagination token // returned from a previous call to ListDeployments that indicates where // this listing should continue from. func (c *CustomersDeploymentsListCall) PageToken(pageToken string) *CustomersDeploymentsListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersDeploymentsListCall) Fields(s ...googleapi.Field) *CustomersDeploymentsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *CustomersDeploymentsListCall) IfNoneMatch(entityTag string) *CustomersDeploymentsListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersDeploymentsListCall) Context(ctx context.Context) *CustomersDeploymentsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersDeploymentsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersDeploymentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/deployments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.deployments.list" call. // Exactly one of *SasPortalListDeploymentsResponse or error will be // non-nil. Any non-2xx status code is an error. Response headers are in // either *SasPortalListDeploymentsResponse.ServerResponse.Header or (if // a response was returned at all) in error.(*googleapi.Error).Header. // Use googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersDeploymentsListCall) Do(opts ...googleapi.CallOption) (*SasPortalListDeploymentsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalListDeploymentsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Lists deployments.", // "flatPath": "v1alpha1/customers/{customersId}/deployments", // "httpMethod": "GET", // "id": "sasportal.customers.deployments.list", // "parameterOrder": [ // "parent" // ], // "parameters": { // "filter": { // "description": "The filter expression. The filter should have the following format: \"DIRECT_CHILDREN\" or format: \"direct_children\". The filter is case insensitive. If empty, then no deployments are filtered.", // "location": "query", // "type": "string" // }, // "pageSize": { // "description": "The maximum number of deployments to return in the response.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "A pagination token returned from a previous call to ListDeployments that indicates where this listing should continue from.", // "location": "query", // "type": "string" // }, // "parent": { // "description": "Required. The parent resource name, for example, \"nodes/1\", customer/1/nodes/2.", // "location": "path", // "pattern": "^customers/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/deployments", // "response": { // "$ref": "SasPortalListDeploymentsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *CustomersDeploymentsListCall) Pages(ctx context.Context, f func(*SasPortalListDeploymentsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "sasportal.customers.deployments.move": type CustomersDeploymentsMoveCall struct { s *Service name string sasportalmovedeploymentrequest *SasPortalMoveDeploymentRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Move: Moves a deployment under another node or customer. // // - name: The name of the deployment to move. func (r *CustomersDeploymentsService) Move(name string, sasportalmovedeploymentrequest *SasPortalMoveDeploymentRequest) *CustomersDeploymentsMoveCall { c := &CustomersDeploymentsMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportalmovedeploymentrequest = sasportalmovedeploymentrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersDeploymentsMoveCall) Fields(s ...googleapi.Field) *CustomersDeploymentsMoveCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersDeploymentsMoveCall) Context(ctx context.Context) *CustomersDeploymentsMoveCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersDeploymentsMoveCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersDeploymentsMoveCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalmovedeploymentrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}:move") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.deployments.move" call. // Exactly one of *SasPortalOperation or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *SasPortalOperation.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersDeploymentsMoveCall) Do(opts ...googleapi.CallOption) (*SasPortalOperation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalOperation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Moves a deployment under another node or customer.", // "flatPath": "v1alpha1/customers/{customersId}/deployments/{deploymentsId}:move", // "httpMethod": "POST", // "id": "sasportal.customers.deployments.move", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the deployment to move.", // "location": "path", // "pattern": "^customers/[^/]+/deployments/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}:move", // "request": { // "$ref": "SasPortalMoveDeploymentRequest" // }, // "response": { // "$ref": "SasPortalOperation" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.deployments.patch": type CustomersDeploymentsPatchCall struct { s *Service name string sasportaldeployment *SasPortalDeployment urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Patch: Updates an existing deployment. // // - name: Output only. Resource name. func (r *CustomersDeploymentsService) Patch(name string, sasportaldeployment *SasPortalDeployment) *CustomersDeploymentsPatchCall { c := &CustomersDeploymentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportaldeployment = sasportaldeployment return c } // UpdateMask sets the optional parameter "updateMask": Fields to be // updated. func (c *CustomersDeploymentsPatchCall) UpdateMask(updateMask string) *CustomersDeploymentsPatchCall { c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersDeploymentsPatchCall) Fields(s ...googleapi.Field) *CustomersDeploymentsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersDeploymentsPatchCall) Context(ctx context.Context) *CustomersDeploymentsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersDeploymentsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersDeploymentsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportaldeployment) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.deployments.patch" call. // Exactly one of *SasPortalDeployment or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *SasPortalDeployment.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersDeploymentsPatchCall) Do(opts ...googleapi.CallOption) (*SasPortalDeployment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDeployment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Updates an existing deployment.", // "flatPath": "v1alpha1/customers/{customersId}/deployments/{deploymentsId}", // "httpMethod": "PATCH", // "id": "sasportal.customers.deployments.patch", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Output only. Resource name.", // "location": "path", // "pattern": "^customers/[^/]+/deployments/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { // "description": "Fields to be updated.", // "format": "google-fieldmask", // "location": "query", // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "request": { // "$ref": "SasPortalDeployment" // }, // "response": { // "$ref": "SasPortalDeployment" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.deployments.devices.create": type CustomersDeploymentsDevicesCreateCall struct { s *Service parent string sasportaldevice *SasPortalDevice urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Create: Creates a device under a node or customer. // // - parent: The name of the parent resource. func (r *CustomersDeploymentsDevicesService) Create(parent string, sasportaldevice *SasPortalDevice) *CustomersDeploymentsDevicesCreateCall { c := &CustomersDeploymentsDevicesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.sasportaldevice = sasportaldevice return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersDeploymentsDevicesCreateCall) Fields(s ...googleapi.Field) *CustomersDeploymentsDevicesCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersDeploymentsDevicesCreateCall) Context(ctx context.Context) *CustomersDeploymentsDevicesCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersDeploymentsDevicesCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersDeploymentsDevicesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportaldevice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/devices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.deployments.devices.create" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersDeploymentsDevicesCreateCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a device under a node or customer.", // "flatPath": "v1alpha1/customers/{customersId}/deployments/{deploymentsId}/devices", // "httpMethod": "POST", // "id": "sasportal.customers.deployments.devices.create", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { // "description": "Required. The name of the parent resource.", // "location": "path", // "pattern": "^customers/[^/]+/deployments/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/devices", // "request": { // "$ref": "SasPortalDevice" // }, // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.deployments.devices.createSigned": type CustomersDeploymentsDevicesCreateSignedCall struct { s *Service parent string sasportalcreatesigneddevicerequest *SasPortalCreateSignedDeviceRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // CreateSigned: Creates a signed device under a node or customer. // // - parent: The name of the parent resource. func (r *CustomersDeploymentsDevicesService) CreateSigned(parent string, sasportalcreatesigneddevicerequest *SasPortalCreateSignedDeviceRequest) *CustomersDeploymentsDevicesCreateSignedCall { c := &CustomersDeploymentsDevicesCreateSignedCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.sasportalcreatesigneddevicerequest = sasportalcreatesigneddevicerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersDeploymentsDevicesCreateSignedCall) Fields(s ...googleapi.Field) *CustomersDeploymentsDevicesCreateSignedCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersDeploymentsDevicesCreateSignedCall) Context(ctx context.Context) *CustomersDeploymentsDevicesCreateSignedCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersDeploymentsDevicesCreateSignedCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersDeploymentsDevicesCreateSignedCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalcreatesigneddevicerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/devices:createSigned") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.deployments.devices.createSigned" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersDeploymentsDevicesCreateSignedCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a signed device under a node or customer.", // "flatPath": "v1alpha1/customers/{customersId}/deployments/{deploymentsId}/devices:createSigned", // "httpMethod": "POST", // "id": "sasportal.customers.deployments.devices.createSigned", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { // "description": "Required. The name of the parent resource.", // "location": "path", // "pattern": "^customers/[^/]+/deployments/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/devices:createSigned", // "request": { // "$ref": "SasPortalCreateSignedDeviceRequest" // }, // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.deployments.devices.list": type CustomersDeploymentsDevicesListCall struct { s *Service parent string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Lists devices under a node or customer. // // - parent: The name of the parent resource. func (r *CustomersDeploymentsDevicesService) List(parent string) *CustomersDeploymentsDevicesListCall { c := &CustomersDeploymentsDevicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } // Filter sets the optional parameter "filter": The filter expression. // The filter should have one of the following formats: "sn=123454" or // "display_name=MyDevice". sn corresponds to serial number of the // device. The filter is case insensitive. func (c *CustomersDeploymentsDevicesListCall) Filter(filter string) *CustomersDeploymentsDevicesListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The maximum number // of devices to return in the response. If empty or zero, all devices // will be listed. Must be in the range [0, 1000]. func (c *CustomersDeploymentsDevicesListCall) PageSize(pageSize int64) *CustomersDeploymentsDevicesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A pagination token // returned from a previous call to ListDevices that indicates where // this listing should continue from. func (c *CustomersDeploymentsDevicesListCall) PageToken(pageToken string) *CustomersDeploymentsDevicesListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersDeploymentsDevicesListCall) Fields(s ...googleapi.Field) *CustomersDeploymentsDevicesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *CustomersDeploymentsDevicesListCall) IfNoneMatch(entityTag string) *CustomersDeploymentsDevicesListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersDeploymentsDevicesListCall) Context(ctx context.Context) *CustomersDeploymentsDevicesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersDeploymentsDevicesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersDeploymentsDevicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/devices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.deployments.devices.list" call. // Exactly one of *SasPortalListDevicesResponse or error will be // non-nil. Any non-2xx status code is an error. Response headers are in // either *SasPortalListDevicesResponse.ServerResponse.Header or (if a // response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersDeploymentsDevicesListCall) Do(opts ...googleapi.CallOption) (*SasPortalListDevicesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalListDevicesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Lists devices under a node or customer.", // "flatPath": "v1alpha1/customers/{customersId}/deployments/{deploymentsId}/devices", // "httpMethod": "GET", // "id": "sasportal.customers.deployments.devices.list", // "parameterOrder": [ // "parent" // ], // "parameters": { // "filter": { // "description": "The filter expression. The filter should have one of the following formats: \"sn=123454\" or \"display_name=MyDevice\". sn corresponds to serial number of the device. The filter is case insensitive.", // "location": "query", // "type": "string" // }, // "pageSize": { // "description": "The maximum number of devices to return in the response. If empty or zero, all devices will be listed. Must be in the range [0, 1000].", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "A pagination token returned from a previous call to ListDevices that indicates where this listing should continue from.", // "location": "query", // "type": "string" // }, // "parent": { // "description": "Required. The name of the parent resource.", // "location": "path", // "pattern": "^customers/[^/]+/deployments/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/devices", // "response": { // "$ref": "SasPortalListDevicesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *CustomersDeploymentsDevicesListCall) Pages(ctx context.Context, f func(*SasPortalListDevicesResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "sasportal.customers.devices.create": type CustomersDevicesCreateCall struct { s *Service parent string sasportaldevice *SasPortalDevice urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Create: Creates a device under a node or customer. // // - parent: The name of the parent resource. func (r *CustomersDevicesService) Create(parent string, sasportaldevice *SasPortalDevice) *CustomersDevicesCreateCall { c := &CustomersDevicesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.sasportaldevice = sasportaldevice return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersDevicesCreateCall) Fields(s ...googleapi.Field) *CustomersDevicesCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersDevicesCreateCall) Context(ctx context.Context) *CustomersDevicesCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersDevicesCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersDevicesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportaldevice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/devices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.devices.create" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersDevicesCreateCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a device under a node or customer.", // "flatPath": "v1alpha1/customers/{customersId}/devices", // "httpMethod": "POST", // "id": "sasportal.customers.devices.create", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { // "description": "Required. The name of the parent resource.", // "location": "path", // "pattern": "^customers/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/devices", // "request": { // "$ref": "SasPortalDevice" // }, // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.devices.createSigned": type CustomersDevicesCreateSignedCall struct { s *Service parent string sasportalcreatesigneddevicerequest *SasPortalCreateSignedDeviceRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // CreateSigned: Creates a signed device under a node or customer. // // - parent: The name of the parent resource. func (r *CustomersDevicesService) CreateSigned(parent string, sasportalcreatesigneddevicerequest *SasPortalCreateSignedDeviceRequest) *CustomersDevicesCreateSignedCall { c := &CustomersDevicesCreateSignedCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.sasportalcreatesigneddevicerequest = sasportalcreatesigneddevicerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersDevicesCreateSignedCall) Fields(s ...googleapi.Field) *CustomersDevicesCreateSignedCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersDevicesCreateSignedCall) Context(ctx context.Context) *CustomersDevicesCreateSignedCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersDevicesCreateSignedCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersDevicesCreateSignedCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalcreatesigneddevicerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/devices:createSigned") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.devices.createSigned" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersDevicesCreateSignedCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a signed device under a node or customer.", // "flatPath": "v1alpha1/customers/{customersId}/devices:createSigned", // "httpMethod": "POST", // "id": "sasportal.customers.devices.createSigned", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { // "description": "Required. The name of the parent resource.", // "location": "path", // "pattern": "^customers/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/devices:createSigned", // "request": { // "$ref": "SasPortalCreateSignedDeviceRequest" // }, // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.devices.delete": type CustomersDevicesDeleteCall struct { s *Service name string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Delete: Deletes a device. // // - name: The name of the device. func (r *CustomersDevicesService) Delete(name string) *CustomersDevicesDeleteCall { c := &CustomersDevicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersDevicesDeleteCall) Fields(s ...googleapi.Field) *CustomersDevicesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersDevicesDeleteCall) Context(ctx context.Context) *CustomersDevicesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersDevicesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersDevicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.devices.delete" call. // Exactly one of *SasPortalEmpty or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalEmpty.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersDevicesDeleteCall) Do(opts ...googleapi.CallOption) (*SasPortalEmpty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalEmpty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Deletes a device.", // "flatPath": "v1alpha1/customers/{customersId}/devices/{devicesId}", // "httpMethod": "DELETE", // "id": "sasportal.customers.devices.delete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the device.", // "location": "path", // "pattern": "^customers/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "response": { // "$ref": "SasPortalEmpty" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.devices.get": type CustomersDevicesGetCall struct { s *Service name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Gets details about a device. // // - name: The name of the device. func (r *CustomersDevicesService) Get(name string) *CustomersDevicesGetCall { c := &CustomersDevicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersDevicesGetCall) Fields(s ...googleapi.Field) *CustomersDevicesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *CustomersDevicesGetCall) IfNoneMatch(entityTag string) *CustomersDevicesGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersDevicesGetCall) Context(ctx context.Context) *CustomersDevicesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersDevicesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersDevicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.devices.get" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersDevicesGetCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Gets details about a device.", // "flatPath": "v1alpha1/customers/{customersId}/devices/{devicesId}", // "httpMethod": "GET", // "id": "sasportal.customers.devices.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the device.", // "location": "path", // "pattern": "^customers/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.devices.list": type CustomersDevicesListCall struct { s *Service parent string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Lists devices under a node or customer. // // - parent: The name of the parent resource. func (r *CustomersDevicesService) List(parent string) *CustomersDevicesListCall { c := &CustomersDevicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } // Filter sets the optional parameter "filter": The filter expression. // The filter should have one of the following formats: "sn=123454" or // "display_name=MyDevice". sn corresponds to serial number of the // device. The filter is case insensitive. func (c *CustomersDevicesListCall) Filter(filter string) *CustomersDevicesListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The maximum number // of devices to return in the response. If empty or zero, all devices // will be listed. Must be in the range [0, 1000]. func (c *CustomersDevicesListCall) PageSize(pageSize int64) *CustomersDevicesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A pagination token // returned from a previous call to ListDevices that indicates where // this listing should continue from. func (c *CustomersDevicesListCall) PageToken(pageToken string) *CustomersDevicesListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersDevicesListCall) Fields(s ...googleapi.Field) *CustomersDevicesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *CustomersDevicesListCall) IfNoneMatch(entityTag string) *CustomersDevicesListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersDevicesListCall) Context(ctx context.Context) *CustomersDevicesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersDevicesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersDevicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/devices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.devices.list" call. // Exactly one of *SasPortalListDevicesResponse or error will be // non-nil. Any non-2xx status code is an error. Response headers are in // either *SasPortalListDevicesResponse.ServerResponse.Header or (if a // response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersDevicesListCall) Do(opts ...googleapi.CallOption) (*SasPortalListDevicesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalListDevicesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Lists devices under a node or customer.", // "flatPath": "v1alpha1/customers/{customersId}/devices", // "httpMethod": "GET", // "id": "sasportal.customers.devices.list", // "parameterOrder": [ // "parent" // ], // "parameters": { // "filter": { // "description": "The filter expression. The filter should have one of the following formats: \"sn=123454\" or \"display_name=MyDevice\". sn corresponds to serial number of the device. The filter is case insensitive.", // "location": "query", // "type": "string" // }, // "pageSize": { // "description": "The maximum number of devices to return in the response. If empty or zero, all devices will be listed. Must be in the range [0, 1000].", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "A pagination token returned from a previous call to ListDevices that indicates where this listing should continue from.", // "location": "query", // "type": "string" // }, // "parent": { // "description": "Required. The name of the parent resource.", // "location": "path", // "pattern": "^customers/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/devices", // "response": { // "$ref": "SasPortalListDevicesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *CustomersDevicesListCall) Pages(ctx context.Context, f func(*SasPortalListDevicesResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "sasportal.customers.devices.move": type CustomersDevicesMoveCall struct { s *Service name string sasportalmovedevicerequest *SasPortalMoveDeviceRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Move: Moves a device under another node or customer. // // - name: The name of the device to move. func (r *CustomersDevicesService) Move(name string, sasportalmovedevicerequest *SasPortalMoveDeviceRequest) *CustomersDevicesMoveCall { c := &CustomersDevicesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportalmovedevicerequest = sasportalmovedevicerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersDevicesMoveCall) Fields(s ...googleapi.Field) *CustomersDevicesMoveCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersDevicesMoveCall) Context(ctx context.Context) *CustomersDevicesMoveCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersDevicesMoveCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersDevicesMoveCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalmovedevicerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}:move") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.devices.move" call. // Exactly one of *SasPortalOperation or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *SasPortalOperation.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersDevicesMoveCall) Do(opts ...googleapi.CallOption) (*SasPortalOperation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalOperation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Moves a device under another node or customer.", // "flatPath": "v1alpha1/customers/{customersId}/devices/{devicesId}:move", // "httpMethod": "POST", // "id": "sasportal.customers.devices.move", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the device to move.", // "location": "path", // "pattern": "^customers/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}:move", // "request": { // "$ref": "SasPortalMoveDeviceRequest" // }, // "response": { // "$ref": "SasPortalOperation" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.devices.patch": type CustomersDevicesPatchCall struct { s *Service name string sasportaldevice *SasPortalDevice urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Patch: Updates a device. // // - name: Output only. The resource path name. func (r *CustomersDevicesService) Patch(name string, sasportaldevice *SasPortalDevice) *CustomersDevicesPatchCall { c := &CustomersDevicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportaldevice = sasportaldevice return c } // UpdateMask sets the optional parameter "updateMask": Fields to be // updated. func (c *CustomersDevicesPatchCall) UpdateMask(updateMask string) *CustomersDevicesPatchCall { c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersDevicesPatchCall) Fields(s ...googleapi.Field) *CustomersDevicesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersDevicesPatchCall) Context(ctx context.Context) *CustomersDevicesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersDevicesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersDevicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportaldevice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.devices.patch" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersDevicesPatchCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Updates a device.", // "flatPath": "v1alpha1/customers/{customersId}/devices/{devicesId}", // "httpMethod": "PATCH", // "id": "sasportal.customers.devices.patch", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Output only. The resource path name.", // "location": "path", // "pattern": "^customers/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { // "description": "Fields to be updated.", // "format": "google-fieldmask", // "location": "query", // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "request": { // "$ref": "SasPortalDevice" // }, // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.devices.signDevice": type CustomersDevicesSignDeviceCall struct { s *Service name string sasportalsigndevicerequest *SasPortalSignDeviceRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // SignDevice: Signs a device. // // - name: Output only. The resource path name. func (r *CustomersDevicesService) SignDevice(name string, sasportalsigndevicerequest *SasPortalSignDeviceRequest) *CustomersDevicesSignDeviceCall { c := &CustomersDevicesSignDeviceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportalsigndevicerequest = sasportalsigndevicerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersDevicesSignDeviceCall) Fields(s ...googleapi.Field) *CustomersDevicesSignDeviceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersDevicesSignDeviceCall) Context(ctx context.Context) *CustomersDevicesSignDeviceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersDevicesSignDeviceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersDevicesSignDeviceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalsigndevicerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}:signDevice") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.devices.signDevice" call. // Exactly one of *SasPortalEmpty or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalEmpty.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersDevicesSignDeviceCall) Do(opts ...googleapi.CallOption) (*SasPortalEmpty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalEmpty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Signs a device.", // "flatPath": "v1alpha1/customers/{customersId}/devices/{devicesId}:signDevice", // "httpMethod": "POST", // "id": "sasportal.customers.devices.signDevice", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Output only. The resource path name.", // "location": "path", // "pattern": "^customers/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}:signDevice", // "request": { // "$ref": "SasPortalSignDeviceRequest" // }, // "response": { // "$ref": "SasPortalEmpty" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.devices.updateSigned": type CustomersDevicesUpdateSignedCall struct { s *Service name string sasportalupdatesigneddevicerequest *SasPortalUpdateSignedDeviceRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // UpdateSigned: Updates a signed device. // // - name: The name of the device to update. func (r *CustomersDevicesService) UpdateSigned(name string, sasportalupdatesigneddevicerequest *SasPortalUpdateSignedDeviceRequest) *CustomersDevicesUpdateSignedCall { c := &CustomersDevicesUpdateSignedCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportalupdatesigneddevicerequest = sasportalupdatesigneddevicerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersDevicesUpdateSignedCall) Fields(s ...googleapi.Field) *CustomersDevicesUpdateSignedCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersDevicesUpdateSignedCall) Context(ctx context.Context) *CustomersDevicesUpdateSignedCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersDevicesUpdateSignedCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersDevicesUpdateSignedCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalupdatesigneddevicerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}:updateSigned") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.devices.updateSigned" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersDevicesUpdateSignedCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Updates a signed device.", // "flatPath": "v1alpha1/customers/{customersId}/devices/{devicesId}:updateSigned", // "httpMethod": "PATCH", // "id": "sasportal.customers.devices.updateSigned", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the device to update.", // "location": "path", // "pattern": "^customers/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}:updateSigned", // "request": { // "$ref": "SasPortalUpdateSignedDeviceRequest" // }, // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.nodes.create": type CustomersNodesCreateCall struct { s *Service parent string sasportalnode *SasPortalNode urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Create: Creates a new node. // // - parent: The parent resource name where the node is to be created. func (r *CustomersNodesService) Create(parent string, sasportalnode *SasPortalNode) *CustomersNodesCreateCall { c := &CustomersNodesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.sasportalnode = sasportalnode return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersNodesCreateCall) Fields(s ...googleapi.Field) *CustomersNodesCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersNodesCreateCall) Context(ctx context.Context) *CustomersNodesCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersNodesCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersNodesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalnode) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/nodes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.nodes.create" call. // Exactly one of *SasPortalNode or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalNode.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersNodesCreateCall) Do(opts ...googleapi.CallOption) (*SasPortalNode, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalNode{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a new node.", // "flatPath": "v1alpha1/customers/{customersId}/nodes", // "httpMethod": "POST", // "id": "sasportal.customers.nodes.create", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { // "description": "Required. The parent resource name where the node is to be created.", // "location": "path", // "pattern": "^customers/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/nodes", // "request": { // "$ref": "SasPortalNode" // }, // "response": { // "$ref": "SasPortalNode" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.nodes.delete": type CustomersNodesDeleteCall struct { s *Service name string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Delete: Deletes a node. // // - name: The name of the node. func (r *CustomersNodesService) Delete(name string) *CustomersNodesDeleteCall { c := &CustomersNodesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersNodesDeleteCall) Fields(s ...googleapi.Field) *CustomersNodesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersNodesDeleteCall) Context(ctx context.Context) *CustomersNodesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersNodesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersNodesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.nodes.delete" call. // Exactly one of *SasPortalEmpty or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalEmpty.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersNodesDeleteCall) Do(opts ...googleapi.CallOption) (*SasPortalEmpty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalEmpty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Deletes a node.", // "flatPath": "v1alpha1/customers/{customersId}/nodes/{nodesId}", // "httpMethod": "DELETE", // "id": "sasportal.customers.nodes.delete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the node.", // "location": "path", // "pattern": "^customers/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "response": { // "$ref": "SasPortalEmpty" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.nodes.get": type CustomersNodesGetCall struct { s *Service name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Returns a requested node. // // - name: The name of the node. func (r *CustomersNodesService) Get(name string) *CustomersNodesGetCall { c := &CustomersNodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersNodesGetCall) Fields(s ...googleapi.Field) *CustomersNodesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *CustomersNodesGetCall) IfNoneMatch(entityTag string) *CustomersNodesGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersNodesGetCall) Context(ctx context.Context) *CustomersNodesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersNodesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersNodesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.nodes.get" call. // Exactly one of *SasPortalNode or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalNode.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersNodesGetCall) Do(opts ...googleapi.CallOption) (*SasPortalNode, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalNode{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Returns a requested node.", // "flatPath": "v1alpha1/customers/{customersId}/nodes/{nodesId}", // "httpMethod": "GET", // "id": "sasportal.customers.nodes.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the node.", // "location": "path", // "pattern": "^customers/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "response": { // "$ref": "SasPortalNode" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.nodes.list": type CustomersNodesListCall struct { s *Service parent string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Lists nodes. // // - parent: The parent resource name, for example, "nodes/1". func (r *CustomersNodesService) List(parent string) *CustomersNodesListCall { c := &CustomersNodesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } // Filter sets the optional parameter "filter": The filter expression. // The filter should have the following format: "DIRECT_CHILDREN" or // format: "direct_children". The filter is case insensitive. If empty, // then no nodes are filtered. func (c *CustomersNodesListCall) Filter(filter string) *CustomersNodesListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The maximum number // of nodes to return in the response. func (c *CustomersNodesListCall) PageSize(pageSize int64) *CustomersNodesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A pagination token // returned from a previous call to ListNodes that indicates where this // listing should continue from. func (c *CustomersNodesListCall) PageToken(pageToken string) *CustomersNodesListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersNodesListCall) Fields(s ...googleapi.Field) *CustomersNodesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *CustomersNodesListCall) IfNoneMatch(entityTag string) *CustomersNodesListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersNodesListCall) Context(ctx context.Context) *CustomersNodesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersNodesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersNodesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/nodes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.nodes.list" call. // Exactly one of *SasPortalListNodesResponse or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either // *SasPortalListNodesResponse.ServerResponse.Header or (if a response // was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersNodesListCall) Do(opts ...googleapi.CallOption) (*SasPortalListNodesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalListNodesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Lists nodes.", // "flatPath": "v1alpha1/customers/{customersId}/nodes", // "httpMethod": "GET", // "id": "sasportal.customers.nodes.list", // "parameterOrder": [ // "parent" // ], // "parameters": { // "filter": { // "description": "The filter expression. The filter should have the following format: \"DIRECT_CHILDREN\" or format: \"direct_children\". The filter is case insensitive. If empty, then no nodes are filtered.", // "location": "query", // "type": "string" // }, // "pageSize": { // "description": "The maximum number of nodes to return in the response.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "A pagination token returned from a previous call to ListNodes that indicates where this listing should continue from.", // "location": "query", // "type": "string" // }, // "parent": { // "description": "Required. The parent resource name, for example, \"nodes/1\".", // "location": "path", // "pattern": "^customers/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/nodes", // "response": { // "$ref": "SasPortalListNodesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *CustomersNodesListCall) Pages(ctx context.Context, f func(*SasPortalListNodesResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "sasportal.customers.nodes.move": type CustomersNodesMoveCall struct { s *Service name string sasportalmovenoderequest *SasPortalMoveNodeRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Move: Moves a node under another node or customer. // // - name: The name of the node to move. func (r *CustomersNodesService) Move(name string, sasportalmovenoderequest *SasPortalMoveNodeRequest) *CustomersNodesMoveCall { c := &CustomersNodesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportalmovenoderequest = sasportalmovenoderequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersNodesMoveCall) Fields(s ...googleapi.Field) *CustomersNodesMoveCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersNodesMoveCall) Context(ctx context.Context) *CustomersNodesMoveCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersNodesMoveCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersNodesMoveCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalmovenoderequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}:move") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.nodes.move" call. // Exactly one of *SasPortalOperation or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *SasPortalOperation.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersNodesMoveCall) Do(opts ...googleapi.CallOption) (*SasPortalOperation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalOperation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Moves a node under another node or customer.", // "flatPath": "v1alpha1/customers/{customersId}/nodes/{nodesId}:move", // "httpMethod": "POST", // "id": "sasportal.customers.nodes.move", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the node to move.", // "location": "path", // "pattern": "^customers/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}:move", // "request": { // "$ref": "SasPortalMoveNodeRequest" // }, // "response": { // "$ref": "SasPortalOperation" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.nodes.patch": type CustomersNodesPatchCall struct { s *Service name string sasportalnode *SasPortalNode urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Patch: Updates an existing node. // // - name: Output only. Resource name. func (r *CustomersNodesService) Patch(name string, sasportalnode *SasPortalNode) *CustomersNodesPatchCall { c := &CustomersNodesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportalnode = sasportalnode return c } // UpdateMask sets the optional parameter "updateMask": Fields to be // updated. func (c *CustomersNodesPatchCall) UpdateMask(updateMask string) *CustomersNodesPatchCall { c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersNodesPatchCall) Fields(s ...googleapi.Field) *CustomersNodesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersNodesPatchCall) Context(ctx context.Context) *CustomersNodesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersNodesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersNodesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalnode) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.nodes.patch" call. // Exactly one of *SasPortalNode or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalNode.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersNodesPatchCall) Do(opts ...googleapi.CallOption) (*SasPortalNode, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalNode{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Updates an existing node.", // "flatPath": "v1alpha1/customers/{customersId}/nodes/{nodesId}", // "httpMethod": "PATCH", // "id": "sasportal.customers.nodes.patch", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Output only. Resource name.", // "location": "path", // "pattern": "^customers/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { // "description": "Fields to be updated.", // "format": "google-fieldmask", // "location": "query", // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "request": { // "$ref": "SasPortalNode" // }, // "response": { // "$ref": "SasPortalNode" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.nodes.deployments.create": type CustomersNodesDeploymentsCreateCall struct { s *Service parent string sasportaldeployment *SasPortalDeployment urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Create: Creates a new deployment. // // - parent: The parent resource name where the deployment is to be // created. func (r *CustomersNodesDeploymentsService) Create(parent string, sasportaldeployment *SasPortalDeployment) *CustomersNodesDeploymentsCreateCall { c := &CustomersNodesDeploymentsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.sasportaldeployment = sasportaldeployment return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersNodesDeploymentsCreateCall) Fields(s ...googleapi.Field) *CustomersNodesDeploymentsCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersNodesDeploymentsCreateCall) Context(ctx context.Context) *CustomersNodesDeploymentsCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersNodesDeploymentsCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersNodesDeploymentsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportaldeployment) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/deployments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.nodes.deployments.create" call. // Exactly one of *SasPortalDeployment or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *SasPortalDeployment.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersNodesDeploymentsCreateCall) Do(opts ...googleapi.CallOption) (*SasPortalDeployment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDeployment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a new deployment.", // "flatPath": "v1alpha1/customers/{customersId}/nodes/{nodesId}/deployments", // "httpMethod": "POST", // "id": "sasportal.customers.nodes.deployments.create", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { // "description": "Required. The parent resource name where the deployment is to be created.", // "location": "path", // "pattern": "^customers/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/deployments", // "request": { // "$ref": "SasPortalDeployment" // }, // "response": { // "$ref": "SasPortalDeployment" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.nodes.deployments.list": type CustomersNodesDeploymentsListCall struct { s *Service parent string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Lists deployments. // // - parent: The parent resource name, for example, "nodes/1", // customer/1/nodes/2. func (r *CustomersNodesDeploymentsService) List(parent string) *CustomersNodesDeploymentsListCall { c := &CustomersNodesDeploymentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } // Filter sets the optional parameter "filter": The filter expression. // The filter should have the following format: "DIRECT_CHILDREN" or // format: "direct_children". The filter is case insensitive. If empty, // then no deployments are filtered. func (c *CustomersNodesDeploymentsListCall) Filter(filter string) *CustomersNodesDeploymentsListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The maximum number // of deployments to return in the response. func (c *CustomersNodesDeploymentsListCall) PageSize(pageSize int64) *CustomersNodesDeploymentsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A pagination token // returned from a previous call to ListDeployments that indicates where // this listing should continue from. func (c *CustomersNodesDeploymentsListCall) PageToken(pageToken string) *CustomersNodesDeploymentsListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersNodesDeploymentsListCall) Fields(s ...googleapi.Field) *CustomersNodesDeploymentsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *CustomersNodesDeploymentsListCall) IfNoneMatch(entityTag string) *CustomersNodesDeploymentsListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersNodesDeploymentsListCall) Context(ctx context.Context) *CustomersNodesDeploymentsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersNodesDeploymentsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersNodesDeploymentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/deployments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.nodes.deployments.list" call. // Exactly one of *SasPortalListDeploymentsResponse or error will be // non-nil. Any non-2xx status code is an error. Response headers are in // either *SasPortalListDeploymentsResponse.ServerResponse.Header or (if // a response was returned at all) in error.(*googleapi.Error).Header. // Use googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersNodesDeploymentsListCall) Do(opts ...googleapi.CallOption) (*SasPortalListDeploymentsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalListDeploymentsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Lists deployments.", // "flatPath": "v1alpha1/customers/{customersId}/nodes/{nodesId}/deployments", // "httpMethod": "GET", // "id": "sasportal.customers.nodes.deployments.list", // "parameterOrder": [ // "parent" // ], // "parameters": { // "filter": { // "description": "The filter expression. The filter should have the following format: \"DIRECT_CHILDREN\" or format: \"direct_children\". The filter is case insensitive. If empty, then no deployments are filtered.", // "location": "query", // "type": "string" // }, // "pageSize": { // "description": "The maximum number of deployments to return in the response.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "A pagination token returned from a previous call to ListDeployments that indicates where this listing should continue from.", // "location": "query", // "type": "string" // }, // "parent": { // "description": "Required. The parent resource name, for example, \"nodes/1\", customer/1/nodes/2.", // "location": "path", // "pattern": "^customers/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/deployments", // "response": { // "$ref": "SasPortalListDeploymentsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *CustomersNodesDeploymentsListCall) Pages(ctx context.Context, f func(*SasPortalListDeploymentsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "sasportal.customers.nodes.devices.create": type CustomersNodesDevicesCreateCall struct { s *Service parent string sasportaldevice *SasPortalDevice urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Create: Creates a device under a node or customer. // // - parent: The name of the parent resource. func (r *CustomersNodesDevicesService) Create(parent string, sasportaldevice *SasPortalDevice) *CustomersNodesDevicesCreateCall { c := &CustomersNodesDevicesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.sasportaldevice = sasportaldevice return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersNodesDevicesCreateCall) Fields(s ...googleapi.Field) *CustomersNodesDevicesCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersNodesDevicesCreateCall) Context(ctx context.Context) *CustomersNodesDevicesCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersNodesDevicesCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersNodesDevicesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportaldevice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/devices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.nodes.devices.create" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersNodesDevicesCreateCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a device under a node or customer.", // "flatPath": "v1alpha1/customers/{customersId}/nodes/{nodesId}/devices", // "httpMethod": "POST", // "id": "sasportal.customers.nodes.devices.create", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { // "description": "Required. The name of the parent resource.", // "location": "path", // "pattern": "^customers/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/devices", // "request": { // "$ref": "SasPortalDevice" // }, // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.nodes.devices.createSigned": type CustomersNodesDevicesCreateSignedCall struct { s *Service parent string sasportalcreatesigneddevicerequest *SasPortalCreateSignedDeviceRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // CreateSigned: Creates a signed device under a node or customer. // // - parent: The name of the parent resource. func (r *CustomersNodesDevicesService) CreateSigned(parent string, sasportalcreatesigneddevicerequest *SasPortalCreateSignedDeviceRequest) *CustomersNodesDevicesCreateSignedCall { c := &CustomersNodesDevicesCreateSignedCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.sasportalcreatesigneddevicerequest = sasportalcreatesigneddevicerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersNodesDevicesCreateSignedCall) Fields(s ...googleapi.Field) *CustomersNodesDevicesCreateSignedCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersNodesDevicesCreateSignedCall) Context(ctx context.Context) *CustomersNodesDevicesCreateSignedCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersNodesDevicesCreateSignedCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersNodesDevicesCreateSignedCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalcreatesigneddevicerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/devices:createSigned") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.nodes.devices.createSigned" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersNodesDevicesCreateSignedCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a signed device under a node or customer.", // "flatPath": "v1alpha1/customers/{customersId}/nodes/{nodesId}/devices:createSigned", // "httpMethod": "POST", // "id": "sasportal.customers.nodes.devices.createSigned", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { // "description": "Required. The name of the parent resource.", // "location": "path", // "pattern": "^customers/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/devices:createSigned", // "request": { // "$ref": "SasPortalCreateSignedDeviceRequest" // }, // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.nodes.devices.list": type CustomersNodesDevicesListCall struct { s *Service parent string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Lists devices under a node or customer. // // - parent: The name of the parent resource. func (r *CustomersNodesDevicesService) List(parent string) *CustomersNodesDevicesListCall { c := &CustomersNodesDevicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } // Filter sets the optional parameter "filter": The filter expression. // The filter should have one of the following formats: "sn=123454" or // "display_name=MyDevice". sn corresponds to serial number of the // device. The filter is case insensitive. func (c *CustomersNodesDevicesListCall) Filter(filter string) *CustomersNodesDevicesListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The maximum number // of devices to return in the response. If empty or zero, all devices // will be listed. Must be in the range [0, 1000]. func (c *CustomersNodesDevicesListCall) PageSize(pageSize int64) *CustomersNodesDevicesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A pagination token // returned from a previous call to ListDevices that indicates where // this listing should continue from. func (c *CustomersNodesDevicesListCall) PageToken(pageToken string) *CustomersNodesDevicesListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersNodesDevicesListCall) Fields(s ...googleapi.Field) *CustomersNodesDevicesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *CustomersNodesDevicesListCall) IfNoneMatch(entityTag string) *CustomersNodesDevicesListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersNodesDevicesListCall) Context(ctx context.Context) *CustomersNodesDevicesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersNodesDevicesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersNodesDevicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/devices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.nodes.devices.list" call. // Exactly one of *SasPortalListDevicesResponse or error will be // non-nil. Any non-2xx status code is an error. Response headers are in // either *SasPortalListDevicesResponse.ServerResponse.Header or (if a // response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersNodesDevicesListCall) Do(opts ...googleapi.CallOption) (*SasPortalListDevicesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalListDevicesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Lists devices under a node or customer.", // "flatPath": "v1alpha1/customers/{customersId}/nodes/{nodesId}/devices", // "httpMethod": "GET", // "id": "sasportal.customers.nodes.devices.list", // "parameterOrder": [ // "parent" // ], // "parameters": { // "filter": { // "description": "The filter expression. The filter should have one of the following formats: \"sn=123454\" or \"display_name=MyDevice\". sn corresponds to serial number of the device. The filter is case insensitive.", // "location": "query", // "type": "string" // }, // "pageSize": { // "description": "The maximum number of devices to return in the response. If empty or zero, all devices will be listed. Must be in the range [0, 1000].", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "A pagination token returned from a previous call to ListDevices that indicates where this listing should continue from.", // "location": "query", // "type": "string" // }, // "parent": { // "description": "Required. The name of the parent resource.", // "location": "path", // "pattern": "^customers/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/devices", // "response": { // "$ref": "SasPortalListDevicesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *CustomersNodesDevicesListCall) Pages(ctx context.Context, f func(*SasPortalListDevicesResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "sasportal.customers.nodes.nodes.create": type CustomersNodesNodesCreateCall struct { s *Service parent string sasportalnode *SasPortalNode urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Create: Creates a new node. // // - parent: The parent resource name where the node is to be created. func (r *CustomersNodesNodesService) Create(parent string, sasportalnode *SasPortalNode) *CustomersNodesNodesCreateCall { c := &CustomersNodesNodesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.sasportalnode = sasportalnode return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersNodesNodesCreateCall) Fields(s ...googleapi.Field) *CustomersNodesNodesCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersNodesNodesCreateCall) Context(ctx context.Context) *CustomersNodesNodesCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersNodesNodesCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersNodesNodesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalnode) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/nodes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.nodes.nodes.create" call. // Exactly one of *SasPortalNode or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalNode.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersNodesNodesCreateCall) Do(opts ...googleapi.CallOption) (*SasPortalNode, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalNode{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a new node.", // "flatPath": "v1alpha1/customers/{customersId}/nodes/{nodesId}/nodes", // "httpMethod": "POST", // "id": "sasportal.customers.nodes.nodes.create", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { // "description": "Required. The parent resource name where the node is to be created.", // "location": "path", // "pattern": "^customers/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/nodes", // "request": { // "$ref": "SasPortalNode" // }, // "response": { // "$ref": "SasPortalNode" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.customers.nodes.nodes.list": type CustomersNodesNodesListCall struct { s *Service parent string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Lists nodes. // // - parent: The parent resource name, for example, "nodes/1". func (r *CustomersNodesNodesService) List(parent string) *CustomersNodesNodesListCall { c := &CustomersNodesNodesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } // Filter sets the optional parameter "filter": The filter expression. // The filter should have the following format: "DIRECT_CHILDREN" or // format: "direct_children". The filter is case insensitive. If empty, // then no nodes are filtered. func (c *CustomersNodesNodesListCall) Filter(filter string) *CustomersNodesNodesListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The maximum number // of nodes to return in the response. func (c *CustomersNodesNodesListCall) PageSize(pageSize int64) *CustomersNodesNodesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A pagination token // returned from a previous call to ListNodes that indicates where this // listing should continue from. func (c *CustomersNodesNodesListCall) PageToken(pageToken string) *CustomersNodesNodesListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CustomersNodesNodesListCall) Fields(s ...googleapi.Field) *CustomersNodesNodesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *CustomersNodesNodesListCall) IfNoneMatch(entityTag string) *CustomersNodesNodesListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CustomersNodesNodesListCall) Context(ctx context.Context) *CustomersNodesNodesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CustomersNodesNodesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CustomersNodesNodesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/nodes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.customers.nodes.nodes.list" call. // Exactly one of *SasPortalListNodesResponse or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either // *SasPortalListNodesResponse.ServerResponse.Header or (if a response // was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *CustomersNodesNodesListCall) Do(opts ...googleapi.CallOption) (*SasPortalListNodesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalListNodesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Lists nodes.", // "flatPath": "v1alpha1/customers/{customersId}/nodes/{nodesId}/nodes", // "httpMethod": "GET", // "id": "sasportal.customers.nodes.nodes.list", // "parameterOrder": [ // "parent" // ], // "parameters": { // "filter": { // "description": "The filter expression. The filter should have the following format: \"DIRECT_CHILDREN\" or format: \"direct_children\". The filter is case insensitive. If empty, then no nodes are filtered.", // "location": "query", // "type": "string" // }, // "pageSize": { // "description": "The maximum number of nodes to return in the response.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "A pagination token returned from a previous call to ListNodes that indicates where this listing should continue from.", // "location": "query", // "type": "string" // }, // "parent": { // "description": "Required. The parent resource name, for example, \"nodes/1\".", // "location": "path", // "pattern": "^customers/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/nodes", // "response": { // "$ref": "SasPortalListNodesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *CustomersNodesNodesListCall) Pages(ctx context.Context, f func(*SasPortalListNodesResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "sasportal.deployments.get": type DeploymentsGetCall struct { s *Service name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Returns a requested deployment. // // - name: The name of the deployment. func (r *DeploymentsService) Get(name string) *DeploymentsGetCall { c := &DeploymentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DeploymentsGetCall) Fields(s ...googleapi.Field) *DeploymentsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *DeploymentsGetCall) IfNoneMatch(entityTag string) *DeploymentsGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *DeploymentsGetCall) Context(ctx context.Context) *DeploymentsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *DeploymentsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *DeploymentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.deployments.get" call. // Exactly one of *SasPortalDeployment or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *SasPortalDeployment.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *DeploymentsGetCall) Do(opts ...googleapi.CallOption) (*SasPortalDeployment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDeployment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Returns a requested deployment.", // "flatPath": "v1alpha1/deployments/{deploymentsId}", // "httpMethod": "GET", // "id": "sasportal.deployments.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the deployment.", // "location": "path", // "pattern": "^deployments/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "response": { // "$ref": "SasPortalDeployment" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.deployments.devices.delete": type DeploymentsDevicesDeleteCall struct { s *Service name string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Delete: Deletes a device. // // - name: The name of the device. func (r *DeploymentsDevicesService) Delete(name string) *DeploymentsDevicesDeleteCall { c := &DeploymentsDevicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DeploymentsDevicesDeleteCall) Fields(s ...googleapi.Field) *DeploymentsDevicesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *DeploymentsDevicesDeleteCall) Context(ctx context.Context) *DeploymentsDevicesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *DeploymentsDevicesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *DeploymentsDevicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.deployments.devices.delete" call. // Exactly one of *SasPortalEmpty or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalEmpty.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *DeploymentsDevicesDeleteCall) Do(opts ...googleapi.CallOption) (*SasPortalEmpty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalEmpty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Deletes a device.", // "flatPath": "v1alpha1/deployments/{deploymentsId}/devices/{devicesId}", // "httpMethod": "DELETE", // "id": "sasportal.deployments.devices.delete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the device.", // "location": "path", // "pattern": "^deployments/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "response": { // "$ref": "SasPortalEmpty" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.deployments.devices.get": type DeploymentsDevicesGetCall struct { s *Service name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Gets details about a device. // // - name: The name of the device. func (r *DeploymentsDevicesService) Get(name string) *DeploymentsDevicesGetCall { c := &DeploymentsDevicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DeploymentsDevicesGetCall) Fields(s ...googleapi.Field) *DeploymentsDevicesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *DeploymentsDevicesGetCall) IfNoneMatch(entityTag string) *DeploymentsDevicesGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *DeploymentsDevicesGetCall) Context(ctx context.Context) *DeploymentsDevicesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *DeploymentsDevicesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *DeploymentsDevicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.deployments.devices.get" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *DeploymentsDevicesGetCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Gets details about a device.", // "flatPath": "v1alpha1/deployments/{deploymentsId}/devices/{devicesId}", // "httpMethod": "GET", // "id": "sasportal.deployments.devices.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the device.", // "location": "path", // "pattern": "^deployments/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.deployments.devices.move": type DeploymentsDevicesMoveCall struct { s *Service name string sasportalmovedevicerequest *SasPortalMoveDeviceRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Move: Moves a device under another node or customer. // // - name: The name of the device to move. func (r *DeploymentsDevicesService) Move(name string, sasportalmovedevicerequest *SasPortalMoveDeviceRequest) *DeploymentsDevicesMoveCall { c := &DeploymentsDevicesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportalmovedevicerequest = sasportalmovedevicerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DeploymentsDevicesMoveCall) Fields(s ...googleapi.Field) *DeploymentsDevicesMoveCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *DeploymentsDevicesMoveCall) Context(ctx context.Context) *DeploymentsDevicesMoveCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *DeploymentsDevicesMoveCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *DeploymentsDevicesMoveCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalmovedevicerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}:move") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.deployments.devices.move" call. // Exactly one of *SasPortalOperation or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *SasPortalOperation.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *DeploymentsDevicesMoveCall) Do(opts ...googleapi.CallOption) (*SasPortalOperation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalOperation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Moves a device under another node or customer.", // "flatPath": "v1alpha1/deployments/{deploymentsId}/devices/{devicesId}:move", // "httpMethod": "POST", // "id": "sasportal.deployments.devices.move", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the device to move.", // "location": "path", // "pattern": "^deployments/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}:move", // "request": { // "$ref": "SasPortalMoveDeviceRequest" // }, // "response": { // "$ref": "SasPortalOperation" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.deployments.devices.patch": type DeploymentsDevicesPatchCall struct { s *Service name string sasportaldevice *SasPortalDevice urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Patch: Updates a device. // // - name: Output only. The resource path name. func (r *DeploymentsDevicesService) Patch(name string, sasportaldevice *SasPortalDevice) *DeploymentsDevicesPatchCall { c := &DeploymentsDevicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportaldevice = sasportaldevice return c } // UpdateMask sets the optional parameter "updateMask": Fields to be // updated. func (c *DeploymentsDevicesPatchCall) UpdateMask(updateMask string) *DeploymentsDevicesPatchCall { c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DeploymentsDevicesPatchCall) Fields(s ...googleapi.Field) *DeploymentsDevicesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *DeploymentsDevicesPatchCall) Context(ctx context.Context) *DeploymentsDevicesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *DeploymentsDevicesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *DeploymentsDevicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportaldevice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.deployments.devices.patch" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *DeploymentsDevicesPatchCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Updates a device.", // "flatPath": "v1alpha1/deployments/{deploymentsId}/devices/{devicesId}", // "httpMethod": "PATCH", // "id": "sasportal.deployments.devices.patch", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Output only. The resource path name.", // "location": "path", // "pattern": "^deployments/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { // "description": "Fields to be updated.", // "format": "google-fieldmask", // "location": "query", // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "request": { // "$ref": "SasPortalDevice" // }, // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.deployments.devices.signDevice": type DeploymentsDevicesSignDeviceCall struct { s *Service name string sasportalsigndevicerequest *SasPortalSignDeviceRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // SignDevice: Signs a device. // // - name: Output only. The resource path name. func (r *DeploymentsDevicesService) SignDevice(name string, sasportalsigndevicerequest *SasPortalSignDeviceRequest) *DeploymentsDevicesSignDeviceCall { c := &DeploymentsDevicesSignDeviceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportalsigndevicerequest = sasportalsigndevicerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DeploymentsDevicesSignDeviceCall) Fields(s ...googleapi.Field) *DeploymentsDevicesSignDeviceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *DeploymentsDevicesSignDeviceCall) Context(ctx context.Context) *DeploymentsDevicesSignDeviceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *DeploymentsDevicesSignDeviceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *DeploymentsDevicesSignDeviceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalsigndevicerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}:signDevice") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.deployments.devices.signDevice" call. // Exactly one of *SasPortalEmpty or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalEmpty.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *DeploymentsDevicesSignDeviceCall) Do(opts ...googleapi.CallOption) (*SasPortalEmpty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalEmpty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Signs a device.", // "flatPath": "v1alpha1/deployments/{deploymentsId}/devices/{devicesId}:signDevice", // "httpMethod": "POST", // "id": "sasportal.deployments.devices.signDevice", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Output only. The resource path name.", // "location": "path", // "pattern": "^deployments/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}:signDevice", // "request": { // "$ref": "SasPortalSignDeviceRequest" // }, // "response": { // "$ref": "SasPortalEmpty" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.deployments.devices.updateSigned": type DeploymentsDevicesUpdateSignedCall struct { s *Service name string sasportalupdatesigneddevicerequest *SasPortalUpdateSignedDeviceRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // UpdateSigned: Updates a signed device. // // - name: The name of the device to update. func (r *DeploymentsDevicesService) UpdateSigned(name string, sasportalupdatesigneddevicerequest *SasPortalUpdateSignedDeviceRequest) *DeploymentsDevicesUpdateSignedCall { c := &DeploymentsDevicesUpdateSignedCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportalupdatesigneddevicerequest = sasportalupdatesigneddevicerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *DeploymentsDevicesUpdateSignedCall) Fields(s ...googleapi.Field) *DeploymentsDevicesUpdateSignedCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *DeploymentsDevicesUpdateSignedCall) Context(ctx context.Context) *DeploymentsDevicesUpdateSignedCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *DeploymentsDevicesUpdateSignedCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *DeploymentsDevicesUpdateSignedCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalupdatesigneddevicerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}:updateSigned") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.deployments.devices.updateSigned" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *DeploymentsDevicesUpdateSignedCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Updates a signed device.", // "flatPath": "v1alpha1/deployments/{deploymentsId}/devices/{devicesId}:updateSigned", // "httpMethod": "PATCH", // "id": "sasportal.deployments.devices.updateSigned", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the device to update.", // "location": "path", // "pattern": "^deployments/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}:updateSigned", // "request": { // "$ref": "SasPortalUpdateSignedDeviceRequest" // }, // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.installer.generateSecret": type InstallerGenerateSecretCall struct { s *Service sasportalgeneratesecretrequest *SasPortalGenerateSecretRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // GenerateSecret: Generates a secret to be used with the // ValidateInstaller. func (r *InstallerService) GenerateSecret(sasportalgeneratesecretrequest *SasPortalGenerateSecretRequest) *InstallerGenerateSecretCall { c := &InstallerGenerateSecretCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sasportalgeneratesecretrequest = sasportalgeneratesecretrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *InstallerGenerateSecretCall) Fields(s ...googleapi.Field) *InstallerGenerateSecretCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *InstallerGenerateSecretCall) Context(ctx context.Context) *InstallerGenerateSecretCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *InstallerGenerateSecretCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *InstallerGenerateSecretCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalgeneratesecretrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/installer:generateSecret") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.installer.generateSecret" call. // Exactly one of *SasPortalGenerateSecretResponse or error will be // non-nil. Any non-2xx status code is an error. Response headers are in // either *SasPortalGenerateSecretResponse.ServerResponse.Header or (if // a response was returned at all) in error.(*googleapi.Error).Header. // Use googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *InstallerGenerateSecretCall) Do(opts ...googleapi.CallOption) (*SasPortalGenerateSecretResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalGenerateSecretResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Generates a secret to be used with the ValidateInstaller.", // "flatPath": "v1alpha1/installer:generateSecret", // "httpMethod": "POST", // "id": "sasportal.installer.generateSecret", // "parameterOrder": [], // "parameters": {}, // "path": "v1alpha1/installer:generateSecret", // "request": { // "$ref": "SasPortalGenerateSecretRequest" // }, // "response": { // "$ref": "SasPortalGenerateSecretResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.installer.validate": type InstallerValidateCall struct { s *Service sasportalvalidateinstallerrequest *SasPortalValidateInstallerRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Validate: Validates the identity of a Certified Professional // Installer (CPI). func (r *InstallerService) Validate(sasportalvalidateinstallerrequest *SasPortalValidateInstallerRequest) *InstallerValidateCall { c := &InstallerValidateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sasportalvalidateinstallerrequest = sasportalvalidateinstallerrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *InstallerValidateCall) Fields(s ...googleapi.Field) *InstallerValidateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *InstallerValidateCall) Context(ctx context.Context) *InstallerValidateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *InstallerValidateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *InstallerValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalvalidateinstallerrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/installer:validate") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.installer.validate" call. // Exactly one of *SasPortalValidateInstallerResponse or error will be // non-nil. Any non-2xx status code is an error. Response headers are in // either *SasPortalValidateInstallerResponse.ServerResponse.Header or // (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was // returned. func (c *InstallerValidateCall) Do(opts ...googleapi.CallOption) (*SasPortalValidateInstallerResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalValidateInstallerResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Validates the identity of a Certified Professional Installer (CPI).", // "flatPath": "v1alpha1/installer:validate", // "httpMethod": "POST", // "id": "sasportal.installer.validate", // "parameterOrder": [], // "parameters": {}, // "path": "v1alpha1/installer:validate", // "request": { // "$ref": "SasPortalValidateInstallerRequest" // }, // "response": { // "$ref": "SasPortalValidateInstallerResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.get": type NodesGetCall struct { s *Service name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Returns a requested node. // // - name: The name of the node. func (r *NodesService) Get(name string) *NodesGetCall { c := &NodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesGetCall) Fields(s ...googleapi.Field) *NodesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *NodesGetCall) IfNoneMatch(entityTag string) *NodesGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesGetCall) Context(ctx context.Context) *NodesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.get" call. // Exactly one of *SasPortalNode or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalNode.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesGetCall) Do(opts ...googleapi.CallOption) (*SasPortalNode, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalNode{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Returns a requested node.", // "flatPath": "v1alpha1/nodes/{nodesId}", // "httpMethod": "GET", // "id": "sasportal.nodes.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the node.", // "location": "path", // "pattern": "^nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "response": { // "$ref": "SasPortalNode" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.deployments.delete": type NodesDeploymentsDeleteCall struct { s *Service name string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Delete: Deletes a deployment. // // - name: The name of the deployment. func (r *NodesDeploymentsService) Delete(name string) *NodesDeploymentsDeleteCall { c := &NodesDeploymentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesDeploymentsDeleteCall) Fields(s ...googleapi.Field) *NodesDeploymentsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesDeploymentsDeleteCall) Context(ctx context.Context) *NodesDeploymentsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesDeploymentsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesDeploymentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.deployments.delete" call. // Exactly one of *SasPortalEmpty or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalEmpty.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesDeploymentsDeleteCall) Do(opts ...googleapi.CallOption) (*SasPortalEmpty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalEmpty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Deletes a deployment.", // "flatPath": "v1alpha1/nodes/{nodesId}/deployments/{deploymentsId}", // "httpMethod": "DELETE", // "id": "sasportal.nodes.deployments.delete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the deployment.", // "location": "path", // "pattern": "^nodes/[^/]+/deployments/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "response": { // "$ref": "SasPortalEmpty" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.deployments.get": type NodesDeploymentsGetCall struct { s *Service name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Returns a requested deployment. // // - name: The name of the deployment. func (r *NodesDeploymentsService) Get(name string) *NodesDeploymentsGetCall { c := &NodesDeploymentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesDeploymentsGetCall) Fields(s ...googleapi.Field) *NodesDeploymentsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *NodesDeploymentsGetCall) IfNoneMatch(entityTag string) *NodesDeploymentsGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesDeploymentsGetCall) Context(ctx context.Context) *NodesDeploymentsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesDeploymentsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesDeploymentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.deployments.get" call. // Exactly one of *SasPortalDeployment or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *SasPortalDeployment.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesDeploymentsGetCall) Do(opts ...googleapi.CallOption) (*SasPortalDeployment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDeployment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Returns a requested deployment.", // "flatPath": "v1alpha1/nodes/{nodesId}/deployments/{deploymentsId}", // "httpMethod": "GET", // "id": "sasportal.nodes.deployments.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the deployment.", // "location": "path", // "pattern": "^nodes/[^/]+/deployments/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "response": { // "$ref": "SasPortalDeployment" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.deployments.list": type NodesDeploymentsListCall struct { s *Service parent string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Lists deployments. // // - parent: The parent resource name, for example, "nodes/1", // customer/1/nodes/2. func (r *NodesDeploymentsService) List(parent string) *NodesDeploymentsListCall { c := &NodesDeploymentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } // Filter sets the optional parameter "filter": The filter expression. // The filter should have the following format: "DIRECT_CHILDREN" or // format: "direct_children". The filter is case insensitive. If empty, // then no deployments are filtered. func (c *NodesDeploymentsListCall) Filter(filter string) *NodesDeploymentsListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The maximum number // of deployments to return in the response. func (c *NodesDeploymentsListCall) PageSize(pageSize int64) *NodesDeploymentsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A pagination token // returned from a previous call to ListDeployments that indicates where // this listing should continue from. func (c *NodesDeploymentsListCall) PageToken(pageToken string) *NodesDeploymentsListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesDeploymentsListCall) Fields(s ...googleapi.Field) *NodesDeploymentsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *NodesDeploymentsListCall) IfNoneMatch(entityTag string) *NodesDeploymentsListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesDeploymentsListCall) Context(ctx context.Context) *NodesDeploymentsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesDeploymentsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesDeploymentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/deployments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.deployments.list" call. // Exactly one of *SasPortalListDeploymentsResponse or error will be // non-nil. Any non-2xx status code is an error. Response headers are in // either *SasPortalListDeploymentsResponse.ServerResponse.Header or (if // a response was returned at all) in error.(*googleapi.Error).Header. // Use googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesDeploymentsListCall) Do(opts ...googleapi.CallOption) (*SasPortalListDeploymentsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalListDeploymentsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Lists deployments.", // "flatPath": "v1alpha1/nodes/{nodesId}/deployments", // "httpMethod": "GET", // "id": "sasportal.nodes.deployments.list", // "parameterOrder": [ // "parent" // ], // "parameters": { // "filter": { // "description": "The filter expression. The filter should have the following format: \"DIRECT_CHILDREN\" or format: \"direct_children\". The filter is case insensitive. If empty, then no deployments are filtered.", // "location": "query", // "type": "string" // }, // "pageSize": { // "description": "The maximum number of deployments to return in the response.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "A pagination token returned from a previous call to ListDeployments that indicates where this listing should continue from.", // "location": "query", // "type": "string" // }, // "parent": { // "description": "Required. The parent resource name, for example, \"nodes/1\", customer/1/nodes/2.", // "location": "path", // "pattern": "^nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/deployments", // "response": { // "$ref": "SasPortalListDeploymentsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *NodesDeploymentsListCall) Pages(ctx context.Context, f func(*SasPortalListDeploymentsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "sasportal.nodes.deployments.move": type NodesDeploymentsMoveCall struct { s *Service name string sasportalmovedeploymentrequest *SasPortalMoveDeploymentRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Move: Moves a deployment under another node or customer. // // - name: The name of the deployment to move. func (r *NodesDeploymentsService) Move(name string, sasportalmovedeploymentrequest *SasPortalMoveDeploymentRequest) *NodesDeploymentsMoveCall { c := &NodesDeploymentsMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportalmovedeploymentrequest = sasportalmovedeploymentrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesDeploymentsMoveCall) Fields(s ...googleapi.Field) *NodesDeploymentsMoveCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesDeploymentsMoveCall) Context(ctx context.Context) *NodesDeploymentsMoveCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesDeploymentsMoveCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesDeploymentsMoveCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalmovedeploymentrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}:move") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.deployments.move" call. // Exactly one of *SasPortalOperation or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *SasPortalOperation.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesDeploymentsMoveCall) Do(opts ...googleapi.CallOption) (*SasPortalOperation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalOperation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Moves a deployment under another node or customer.", // "flatPath": "v1alpha1/nodes/{nodesId}/deployments/{deploymentsId}:move", // "httpMethod": "POST", // "id": "sasportal.nodes.deployments.move", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the deployment to move.", // "location": "path", // "pattern": "^nodes/[^/]+/deployments/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}:move", // "request": { // "$ref": "SasPortalMoveDeploymentRequest" // }, // "response": { // "$ref": "SasPortalOperation" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.deployments.patch": type NodesDeploymentsPatchCall struct { s *Service name string sasportaldeployment *SasPortalDeployment urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Patch: Updates an existing deployment. // // - name: Output only. Resource name. func (r *NodesDeploymentsService) Patch(name string, sasportaldeployment *SasPortalDeployment) *NodesDeploymentsPatchCall { c := &NodesDeploymentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportaldeployment = sasportaldeployment return c } // UpdateMask sets the optional parameter "updateMask": Fields to be // updated. func (c *NodesDeploymentsPatchCall) UpdateMask(updateMask string) *NodesDeploymentsPatchCall { c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesDeploymentsPatchCall) Fields(s ...googleapi.Field) *NodesDeploymentsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesDeploymentsPatchCall) Context(ctx context.Context) *NodesDeploymentsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesDeploymentsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesDeploymentsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportaldeployment) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.deployments.patch" call. // Exactly one of *SasPortalDeployment or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *SasPortalDeployment.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesDeploymentsPatchCall) Do(opts ...googleapi.CallOption) (*SasPortalDeployment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDeployment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Updates an existing deployment.", // "flatPath": "v1alpha1/nodes/{nodesId}/deployments/{deploymentsId}", // "httpMethod": "PATCH", // "id": "sasportal.nodes.deployments.patch", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Output only. Resource name.", // "location": "path", // "pattern": "^nodes/[^/]+/deployments/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { // "description": "Fields to be updated.", // "format": "google-fieldmask", // "location": "query", // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "request": { // "$ref": "SasPortalDeployment" // }, // "response": { // "$ref": "SasPortalDeployment" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.deployments.devices.create": type NodesDeploymentsDevicesCreateCall struct { s *Service parent string sasportaldevice *SasPortalDevice urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Create: Creates a device under a node or customer. // // - parent: The name of the parent resource. func (r *NodesDeploymentsDevicesService) Create(parent string, sasportaldevice *SasPortalDevice) *NodesDeploymentsDevicesCreateCall { c := &NodesDeploymentsDevicesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.sasportaldevice = sasportaldevice return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesDeploymentsDevicesCreateCall) Fields(s ...googleapi.Field) *NodesDeploymentsDevicesCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesDeploymentsDevicesCreateCall) Context(ctx context.Context) *NodesDeploymentsDevicesCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesDeploymentsDevicesCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesDeploymentsDevicesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportaldevice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/devices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.deployments.devices.create" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesDeploymentsDevicesCreateCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a device under a node or customer.", // "flatPath": "v1alpha1/nodes/{nodesId}/deployments/{deploymentsId}/devices", // "httpMethod": "POST", // "id": "sasportal.nodes.deployments.devices.create", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { // "description": "Required. The name of the parent resource.", // "location": "path", // "pattern": "^nodes/[^/]+/deployments/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/devices", // "request": { // "$ref": "SasPortalDevice" // }, // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.deployments.devices.createSigned": type NodesDeploymentsDevicesCreateSignedCall struct { s *Service parent string sasportalcreatesigneddevicerequest *SasPortalCreateSignedDeviceRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // CreateSigned: Creates a signed device under a node or customer. // // - parent: The name of the parent resource. func (r *NodesDeploymentsDevicesService) CreateSigned(parent string, sasportalcreatesigneddevicerequest *SasPortalCreateSignedDeviceRequest) *NodesDeploymentsDevicesCreateSignedCall { c := &NodesDeploymentsDevicesCreateSignedCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.sasportalcreatesigneddevicerequest = sasportalcreatesigneddevicerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesDeploymentsDevicesCreateSignedCall) Fields(s ...googleapi.Field) *NodesDeploymentsDevicesCreateSignedCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesDeploymentsDevicesCreateSignedCall) Context(ctx context.Context) *NodesDeploymentsDevicesCreateSignedCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesDeploymentsDevicesCreateSignedCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesDeploymentsDevicesCreateSignedCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalcreatesigneddevicerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/devices:createSigned") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.deployments.devices.createSigned" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesDeploymentsDevicesCreateSignedCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a signed device under a node or customer.", // "flatPath": "v1alpha1/nodes/{nodesId}/deployments/{deploymentsId}/devices:createSigned", // "httpMethod": "POST", // "id": "sasportal.nodes.deployments.devices.createSigned", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { // "description": "Required. The name of the parent resource.", // "location": "path", // "pattern": "^nodes/[^/]+/deployments/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/devices:createSigned", // "request": { // "$ref": "SasPortalCreateSignedDeviceRequest" // }, // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.deployments.devices.list": type NodesDeploymentsDevicesListCall struct { s *Service parent string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Lists devices under a node or customer. // // - parent: The name of the parent resource. func (r *NodesDeploymentsDevicesService) List(parent string) *NodesDeploymentsDevicesListCall { c := &NodesDeploymentsDevicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } // Filter sets the optional parameter "filter": The filter expression. // The filter should have one of the following formats: "sn=123454" or // "display_name=MyDevice". sn corresponds to serial number of the // device. The filter is case insensitive. func (c *NodesDeploymentsDevicesListCall) Filter(filter string) *NodesDeploymentsDevicesListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The maximum number // of devices to return in the response. If empty or zero, all devices // will be listed. Must be in the range [0, 1000]. func (c *NodesDeploymentsDevicesListCall) PageSize(pageSize int64) *NodesDeploymentsDevicesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A pagination token // returned from a previous call to ListDevices that indicates where // this listing should continue from. func (c *NodesDeploymentsDevicesListCall) PageToken(pageToken string) *NodesDeploymentsDevicesListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesDeploymentsDevicesListCall) Fields(s ...googleapi.Field) *NodesDeploymentsDevicesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *NodesDeploymentsDevicesListCall) IfNoneMatch(entityTag string) *NodesDeploymentsDevicesListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesDeploymentsDevicesListCall) Context(ctx context.Context) *NodesDeploymentsDevicesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesDeploymentsDevicesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesDeploymentsDevicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/devices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.deployments.devices.list" call. // Exactly one of *SasPortalListDevicesResponse or error will be // non-nil. Any non-2xx status code is an error. Response headers are in // either *SasPortalListDevicesResponse.ServerResponse.Header or (if a // response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesDeploymentsDevicesListCall) Do(opts ...googleapi.CallOption) (*SasPortalListDevicesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalListDevicesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Lists devices under a node or customer.", // "flatPath": "v1alpha1/nodes/{nodesId}/deployments/{deploymentsId}/devices", // "httpMethod": "GET", // "id": "sasportal.nodes.deployments.devices.list", // "parameterOrder": [ // "parent" // ], // "parameters": { // "filter": { // "description": "The filter expression. The filter should have one of the following formats: \"sn=123454\" or \"display_name=MyDevice\". sn corresponds to serial number of the device. The filter is case insensitive.", // "location": "query", // "type": "string" // }, // "pageSize": { // "description": "The maximum number of devices to return in the response. If empty or zero, all devices will be listed. Must be in the range [0, 1000].", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "A pagination token returned from a previous call to ListDevices that indicates where this listing should continue from.", // "location": "query", // "type": "string" // }, // "parent": { // "description": "Required. The name of the parent resource.", // "location": "path", // "pattern": "^nodes/[^/]+/deployments/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/devices", // "response": { // "$ref": "SasPortalListDevicesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *NodesDeploymentsDevicesListCall) Pages(ctx context.Context, f func(*SasPortalListDevicesResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "sasportal.nodes.devices.create": type NodesDevicesCreateCall struct { s *Service parent string sasportaldevice *SasPortalDevice urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Create: Creates a device under a node or customer. // // - parent: The name of the parent resource. func (r *NodesDevicesService) Create(parent string, sasportaldevice *SasPortalDevice) *NodesDevicesCreateCall { c := &NodesDevicesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.sasportaldevice = sasportaldevice return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesDevicesCreateCall) Fields(s ...googleapi.Field) *NodesDevicesCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesDevicesCreateCall) Context(ctx context.Context) *NodesDevicesCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesDevicesCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesDevicesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportaldevice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/devices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.devices.create" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesDevicesCreateCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a device under a node or customer.", // "flatPath": "v1alpha1/nodes/{nodesId}/devices", // "httpMethod": "POST", // "id": "sasportal.nodes.devices.create", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { // "description": "Required. The name of the parent resource.", // "location": "path", // "pattern": "^nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/devices", // "request": { // "$ref": "SasPortalDevice" // }, // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.devices.createSigned": type NodesDevicesCreateSignedCall struct { s *Service parent string sasportalcreatesigneddevicerequest *SasPortalCreateSignedDeviceRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // CreateSigned: Creates a signed device under a node or customer. // // - parent: The name of the parent resource. func (r *NodesDevicesService) CreateSigned(parent string, sasportalcreatesigneddevicerequest *SasPortalCreateSignedDeviceRequest) *NodesDevicesCreateSignedCall { c := &NodesDevicesCreateSignedCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.sasportalcreatesigneddevicerequest = sasportalcreatesigneddevicerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesDevicesCreateSignedCall) Fields(s ...googleapi.Field) *NodesDevicesCreateSignedCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesDevicesCreateSignedCall) Context(ctx context.Context) *NodesDevicesCreateSignedCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesDevicesCreateSignedCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesDevicesCreateSignedCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalcreatesigneddevicerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/devices:createSigned") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.devices.createSigned" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesDevicesCreateSignedCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a signed device under a node or customer.", // "flatPath": "v1alpha1/nodes/{nodesId}/devices:createSigned", // "httpMethod": "POST", // "id": "sasportal.nodes.devices.createSigned", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { // "description": "Required. The name of the parent resource.", // "location": "path", // "pattern": "^nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/devices:createSigned", // "request": { // "$ref": "SasPortalCreateSignedDeviceRequest" // }, // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.devices.delete": type NodesDevicesDeleteCall struct { s *Service name string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Delete: Deletes a device. // // - name: The name of the device. func (r *NodesDevicesService) Delete(name string) *NodesDevicesDeleteCall { c := &NodesDevicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesDevicesDeleteCall) Fields(s ...googleapi.Field) *NodesDevicesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesDevicesDeleteCall) Context(ctx context.Context) *NodesDevicesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesDevicesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesDevicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.devices.delete" call. // Exactly one of *SasPortalEmpty or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalEmpty.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesDevicesDeleteCall) Do(opts ...googleapi.CallOption) (*SasPortalEmpty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalEmpty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Deletes a device.", // "flatPath": "v1alpha1/nodes/{nodesId}/devices/{devicesId}", // "httpMethod": "DELETE", // "id": "sasportal.nodes.devices.delete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the device.", // "location": "path", // "pattern": "^nodes/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "response": { // "$ref": "SasPortalEmpty" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.devices.get": type NodesDevicesGetCall struct { s *Service name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Gets details about a device. // // - name: The name of the device. func (r *NodesDevicesService) Get(name string) *NodesDevicesGetCall { c := &NodesDevicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesDevicesGetCall) Fields(s ...googleapi.Field) *NodesDevicesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *NodesDevicesGetCall) IfNoneMatch(entityTag string) *NodesDevicesGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesDevicesGetCall) Context(ctx context.Context) *NodesDevicesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesDevicesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesDevicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.devices.get" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesDevicesGetCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Gets details about a device.", // "flatPath": "v1alpha1/nodes/{nodesId}/devices/{devicesId}", // "httpMethod": "GET", // "id": "sasportal.nodes.devices.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the device.", // "location": "path", // "pattern": "^nodes/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.devices.list": type NodesDevicesListCall struct { s *Service parent string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Lists devices under a node or customer. // // - parent: The name of the parent resource. func (r *NodesDevicesService) List(parent string) *NodesDevicesListCall { c := &NodesDevicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } // Filter sets the optional parameter "filter": The filter expression. // The filter should have one of the following formats: "sn=123454" or // "display_name=MyDevice". sn corresponds to serial number of the // device. The filter is case insensitive. func (c *NodesDevicesListCall) Filter(filter string) *NodesDevicesListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The maximum number // of devices to return in the response. If empty or zero, all devices // will be listed. Must be in the range [0, 1000]. func (c *NodesDevicesListCall) PageSize(pageSize int64) *NodesDevicesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A pagination token // returned from a previous call to ListDevices that indicates where // this listing should continue from. func (c *NodesDevicesListCall) PageToken(pageToken string) *NodesDevicesListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesDevicesListCall) Fields(s ...googleapi.Field) *NodesDevicesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *NodesDevicesListCall) IfNoneMatch(entityTag string) *NodesDevicesListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesDevicesListCall) Context(ctx context.Context) *NodesDevicesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesDevicesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesDevicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/devices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.devices.list" call. // Exactly one of *SasPortalListDevicesResponse or error will be // non-nil. Any non-2xx status code is an error. Response headers are in // either *SasPortalListDevicesResponse.ServerResponse.Header or (if a // response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesDevicesListCall) Do(opts ...googleapi.CallOption) (*SasPortalListDevicesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalListDevicesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Lists devices under a node or customer.", // "flatPath": "v1alpha1/nodes/{nodesId}/devices", // "httpMethod": "GET", // "id": "sasportal.nodes.devices.list", // "parameterOrder": [ // "parent" // ], // "parameters": { // "filter": { // "description": "The filter expression. The filter should have one of the following formats: \"sn=123454\" or \"display_name=MyDevice\". sn corresponds to serial number of the device. The filter is case insensitive.", // "location": "query", // "type": "string" // }, // "pageSize": { // "description": "The maximum number of devices to return in the response. If empty or zero, all devices will be listed. Must be in the range [0, 1000].", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "A pagination token returned from a previous call to ListDevices that indicates where this listing should continue from.", // "location": "query", // "type": "string" // }, // "parent": { // "description": "Required. The name of the parent resource.", // "location": "path", // "pattern": "^nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/devices", // "response": { // "$ref": "SasPortalListDevicesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *NodesDevicesListCall) Pages(ctx context.Context, f func(*SasPortalListDevicesResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "sasportal.nodes.devices.move": type NodesDevicesMoveCall struct { s *Service name string sasportalmovedevicerequest *SasPortalMoveDeviceRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Move: Moves a device under another node or customer. // // - name: The name of the device to move. func (r *NodesDevicesService) Move(name string, sasportalmovedevicerequest *SasPortalMoveDeviceRequest) *NodesDevicesMoveCall { c := &NodesDevicesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportalmovedevicerequest = sasportalmovedevicerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesDevicesMoveCall) Fields(s ...googleapi.Field) *NodesDevicesMoveCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesDevicesMoveCall) Context(ctx context.Context) *NodesDevicesMoveCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesDevicesMoveCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesDevicesMoveCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalmovedevicerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}:move") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.devices.move" call. // Exactly one of *SasPortalOperation or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *SasPortalOperation.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesDevicesMoveCall) Do(opts ...googleapi.CallOption) (*SasPortalOperation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalOperation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Moves a device under another node or customer.", // "flatPath": "v1alpha1/nodes/{nodesId}/devices/{devicesId}:move", // "httpMethod": "POST", // "id": "sasportal.nodes.devices.move", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the device to move.", // "location": "path", // "pattern": "^nodes/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}:move", // "request": { // "$ref": "SasPortalMoveDeviceRequest" // }, // "response": { // "$ref": "SasPortalOperation" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.devices.patch": type NodesDevicesPatchCall struct { s *Service name string sasportaldevice *SasPortalDevice urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Patch: Updates a device. // // - name: Output only. The resource path name. func (r *NodesDevicesService) Patch(name string, sasportaldevice *SasPortalDevice) *NodesDevicesPatchCall { c := &NodesDevicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportaldevice = sasportaldevice return c } // UpdateMask sets the optional parameter "updateMask": Fields to be // updated. func (c *NodesDevicesPatchCall) UpdateMask(updateMask string) *NodesDevicesPatchCall { c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesDevicesPatchCall) Fields(s ...googleapi.Field) *NodesDevicesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesDevicesPatchCall) Context(ctx context.Context) *NodesDevicesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesDevicesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesDevicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportaldevice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.devices.patch" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesDevicesPatchCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Updates a device.", // "flatPath": "v1alpha1/nodes/{nodesId}/devices/{devicesId}", // "httpMethod": "PATCH", // "id": "sasportal.nodes.devices.patch", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Output only. The resource path name.", // "location": "path", // "pattern": "^nodes/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { // "description": "Fields to be updated.", // "format": "google-fieldmask", // "location": "query", // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "request": { // "$ref": "SasPortalDevice" // }, // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.devices.signDevice": type NodesDevicesSignDeviceCall struct { s *Service name string sasportalsigndevicerequest *SasPortalSignDeviceRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // SignDevice: Signs a device. // // - name: Output only. The resource path name. func (r *NodesDevicesService) SignDevice(name string, sasportalsigndevicerequest *SasPortalSignDeviceRequest) *NodesDevicesSignDeviceCall { c := &NodesDevicesSignDeviceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportalsigndevicerequest = sasportalsigndevicerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesDevicesSignDeviceCall) Fields(s ...googleapi.Field) *NodesDevicesSignDeviceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesDevicesSignDeviceCall) Context(ctx context.Context) *NodesDevicesSignDeviceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesDevicesSignDeviceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesDevicesSignDeviceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalsigndevicerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}:signDevice") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.devices.signDevice" call. // Exactly one of *SasPortalEmpty or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalEmpty.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesDevicesSignDeviceCall) Do(opts ...googleapi.CallOption) (*SasPortalEmpty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalEmpty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Signs a device.", // "flatPath": "v1alpha1/nodes/{nodesId}/devices/{devicesId}:signDevice", // "httpMethod": "POST", // "id": "sasportal.nodes.devices.signDevice", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Output only. The resource path name.", // "location": "path", // "pattern": "^nodes/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}:signDevice", // "request": { // "$ref": "SasPortalSignDeviceRequest" // }, // "response": { // "$ref": "SasPortalEmpty" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.devices.updateSigned": type NodesDevicesUpdateSignedCall struct { s *Service name string sasportalupdatesigneddevicerequest *SasPortalUpdateSignedDeviceRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // UpdateSigned: Updates a signed device. // // - name: The name of the device to update. func (r *NodesDevicesService) UpdateSigned(name string, sasportalupdatesigneddevicerequest *SasPortalUpdateSignedDeviceRequest) *NodesDevicesUpdateSignedCall { c := &NodesDevicesUpdateSignedCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportalupdatesigneddevicerequest = sasportalupdatesigneddevicerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesDevicesUpdateSignedCall) Fields(s ...googleapi.Field) *NodesDevicesUpdateSignedCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesDevicesUpdateSignedCall) Context(ctx context.Context) *NodesDevicesUpdateSignedCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesDevicesUpdateSignedCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesDevicesUpdateSignedCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalupdatesigneddevicerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}:updateSigned") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.devices.updateSigned" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesDevicesUpdateSignedCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Updates a signed device.", // "flatPath": "v1alpha1/nodes/{nodesId}/devices/{devicesId}:updateSigned", // "httpMethod": "PATCH", // "id": "sasportal.nodes.devices.updateSigned", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the device to update.", // "location": "path", // "pattern": "^nodes/[^/]+/devices/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}:updateSigned", // "request": { // "$ref": "SasPortalUpdateSignedDeviceRequest" // }, // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.nodes.create": type NodesNodesCreateCall struct { s *Service parent string sasportalnode *SasPortalNode urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Create: Creates a new node. // // - parent: The parent resource name where the node is to be created. func (r *NodesNodesService) Create(parent string, sasportalnode *SasPortalNode) *NodesNodesCreateCall { c := &NodesNodesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.sasportalnode = sasportalnode return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesNodesCreateCall) Fields(s ...googleapi.Field) *NodesNodesCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesNodesCreateCall) Context(ctx context.Context) *NodesNodesCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesNodesCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesNodesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalnode) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/nodes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.nodes.create" call. // Exactly one of *SasPortalNode or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalNode.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesNodesCreateCall) Do(opts ...googleapi.CallOption) (*SasPortalNode, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalNode{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a new node.", // "flatPath": "v1alpha1/nodes/{nodesId}/nodes", // "httpMethod": "POST", // "id": "sasportal.nodes.nodes.create", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { // "description": "Required. The parent resource name where the node is to be created.", // "location": "path", // "pattern": "^nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/nodes", // "request": { // "$ref": "SasPortalNode" // }, // "response": { // "$ref": "SasPortalNode" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.nodes.delete": type NodesNodesDeleteCall struct { s *Service name string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Delete: Deletes a node. // // - name: The name of the node. func (r *NodesNodesService) Delete(name string) *NodesNodesDeleteCall { c := &NodesNodesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesNodesDeleteCall) Fields(s ...googleapi.Field) *NodesNodesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesNodesDeleteCall) Context(ctx context.Context) *NodesNodesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesNodesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesNodesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.nodes.delete" call. // Exactly one of *SasPortalEmpty or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalEmpty.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesNodesDeleteCall) Do(opts ...googleapi.CallOption) (*SasPortalEmpty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalEmpty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Deletes a node.", // "flatPath": "v1alpha1/nodes/{nodesId}/nodes/{nodesId1}", // "httpMethod": "DELETE", // "id": "sasportal.nodes.nodes.delete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the node.", // "location": "path", // "pattern": "^nodes/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "response": { // "$ref": "SasPortalEmpty" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.nodes.get": type NodesNodesGetCall struct { s *Service name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Returns a requested node. // // - name: The name of the node. func (r *NodesNodesService) Get(name string) *NodesNodesGetCall { c := &NodesNodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesNodesGetCall) Fields(s ...googleapi.Field) *NodesNodesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *NodesNodesGetCall) IfNoneMatch(entityTag string) *NodesNodesGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesNodesGetCall) Context(ctx context.Context) *NodesNodesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesNodesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesNodesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.nodes.get" call. // Exactly one of *SasPortalNode or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalNode.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesNodesGetCall) Do(opts ...googleapi.CallOption) (*SasPortalNode, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalNode{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Returns a requested node.", // "flatPath": "v1alpha1/nodes/{nodesId}/nodes/{nodesId1}", // "httpMethod": "GET", // "id": "sasportal.nodes.nodes.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the node.", // "location": "path", // "pattern": "^nodes/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "response": { // "$ref": "SasPortalNode" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.nodes.list": type NodesNodesListCall struct { s *Service parent string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Lists nodes. // // - parent: The parent resource name, for example, "nodes/1". func (r *NodesNodesService) List(parent string) *NodesNodesListCall { c := &NodesNodesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } // Filter sets the optional parameter "filter": The filter expression. // The filter should have the following format: "DIRECT_CHILDREN" or // format: "direct_children". The filter is case insensitive. If empty, // then no nodes are filtered. func (c *NodesNodesListCall) Filter(filter string) *NodesNodesListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The maximum number // of nodes to return in the response. func (c *NodesNodesListCall) PageSize(pageSize int64) *NodesNodesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A pagination token // returned from a previous call to ListNodes that indicates where this // listing should continue from. func (c *NodesNodesListCall) PageToken(pageToken string) *NodesNodesListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesNodesListCall) Fields(s ...googleapi.Field) *NodesNodesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *NodesNodesListCall) IfNoneMatch(entityTag string) *NodesNodesListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesNodesListCall) Context(ctx context.Context) *NodesNodesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesNodesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesNodesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/nodes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.nodes.list" call. // Exactly one of *SasPortalListNodesResponse or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either // *SasPortalListNodesResponse.ServerResponse.Header or (if a response // was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesNodesListCall) Do(opts ...googleapi.CallOption) (*SasPortalListNodesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalListNodesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Lists nodes.", // "flatPath": "v1alpha1/nodes/{nodesId}/nodes", // "httpMethod": "GET", // "id": "sasportal.nodes.nodes.list", // "parameterOrder": [ // "parent" // ], // "parameters": { // "filter": { // "description": "The filter expression. The filter should have the following format: \"DIRECT_CHILDREN\" or format: \"direct_children\". The filter is case insensitive. If empty, then no nodes are filtered.", // "location": "query", // "type": "string" // }, // "pageSize": { // "description": "The maximum number of nodes to return in the response.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "A pagination token returned from a previous call to ListNodes that indicates where this listing should continue from.", // "location": "query", // "type": "string" // }, // "parent": { // "description": "Required. The parent resource name, for example, \"nodes/1\".", // "location": "path", // "pattern": "^nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/nodes", // "response": { // "$ref": "SasPortalListNodesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *NodesNodesListCall) Pages(ctx context.Context, f func(*SasPortalListNodesResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "sasportal.nodes.nodes.move": type NodesNodesMoveCall struct { s *Service name string sasportalmovenoderequest *SasPortalMoveNodeRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Move: Moves a node under another node or customer. // // - name: The name of the node to move. func (r *NodesNodesService) Move(name string, sasportalmovenoderequest *SasPortalMoveNodeRequest) *NodesNodesMoveCall { c := &NodesNodesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportalmovenoderequest = sasportalmovenoderequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesNodesMoveCall) Fields(s ...googleapi.Field) *NodesNodesMoveCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesNodesMoveCall) Context(ctx context.Context) *NodesNodesMoveCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesNodesMoveCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesNodesMoveCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalmovenoderequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}:move") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.nodes.move" call. // Exactly one of *SasPortalOperation or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *SasPortalOperation.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesNodesMoveCall) Do(opts ...googleapi.CallOption) (*SasPortalOperation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalOperation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Moves a node under another node or customer.", // "flatPath": "v1alpha1/nodes/{nodesId}/nodes/{nodesId1}:move", // "httpMethod": "POST", // "id": "sasportal.nodes.nodes.move", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Required. The name of the node to move.", // "location": "path", // "pattern": "^nodes/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+name}:move", // "request": { // "$ref": "SasPortalMoveNodeRequest" // }, // "response": { // "$ref": "SasPortalOperation" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.nodes.patch": type NodesNodesPatchCall struct { s *Service name string sasportalnode *SasPortalNode urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Patch: Updates an existing node. // // - name: Output only. Resource name. func (r *NodesNodesService) Patch(name string, sasportalnode *SasPortalNode) *NodesNodesPatchCall { c := &NodesNodesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.sasportalnode = sasportalnode return c } // UpdateMask sets the optional parameter "updateMask": Fields to be // updated. func (c *NodesNodesPatchCall) UpdateMask(updateMask string) *NodesNodesPatchCall { c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesNodesPatchCall) Fields(s ...googleapi.Field) *NodesNodesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesNodesPatchCall) Context(ctx context.Context) *NodesNodesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesNodesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesNodesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalnode) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.nodes.patch" call. // Exactly one of *SasPortalNode or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalNode.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesNodesPatchCall) Do(opts ...googleapi.CallOption) (*SasPortalNode, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalNode{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Updates an existing node.", // "flatPath": "v1alpha1/nodes/{nodesId}/nodes/{nodesId1}", // "httpMethod": "PATCH", // "id": "sasportal.nodes.nodes.patch", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Output only. Resource name.", // "location": "path", // "pattern": "^nodes/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // }, // "updateMask": { // "description": "Fields to be updated.", // "format": "google-fieldmask", // "location": "query", // "type": "string" // } // }, // "path": "v1alpha1/{+name}", // "request": { // "$ref": "SasPortalNode" // }, // "response": { // "$ref": "SasPortalNode" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.nodes.deployments.create": type NodesNodesDeploymentsCreateCall struct { s *Service parent string sasportaldeployment *SasPortalDeployment urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Create: Creates a new deployment. // // - parent: The parent resource name where the deployment is to be // created. func (r *NodesNodesDeploymentsService) Create(parent string, sasportaldeployment *SasPortalDeployment) *NodesNodesDeploymentsCreateCall { c := &NodesNodesDeploymentsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.sasportaldeployment = sasportaldeployment return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesNodesDeploymentsCreateCall) Fields(s ...googleapi.Field) *NodesNodesDeploymentsCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesNodesDeploymentsCreateCall) Context(ctx context.Context) *NodesNodesDeploymentsCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesNodesDeploymentsCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesNodesDeploymentsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportaldeployment) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/deployments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.nodes.deployments.create" call. // Exactly one of *SasPortalDeployment or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *SasPortalDeployment.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesNodesDeploymentsCreateCall) Do(opts ...googleapi.CallOption) (*SasPortalDeployment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDeployment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a new deployment.", // "flatPath": "v1alpha1/nodes/{nodesId}/nodes/{nodesId1}/deployments", // "httpMethod": "POST", // "id": "sasportal.nodes.nodes.deployments.create", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { // "description": "Required. The parent resource name where the deployment is to be created.", // "location": "path", // "pattern": "^nodes/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/deployments", // "request": { // "$ref": "SasPortalDeployment" // }, // "response": { // "$ref": "SasPortalDeployment" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.nodes.deployments.list": type NodesNodesDeploymentsListCall struct { s *Service parent string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Lists deployments. // // - parent: The parent resource name, for example, "nodes/1", // customer/1/nodes/2. func (r *NodesNodesDeploymentsService) List(parent string) *NodesNodesDeploymentsListCall { c := &NodesNodesDeploymentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } // Filter sets the optional parameter "filter": The filter expression. // The filter should have the following format: "DIRECT_CHILDREN" or // format: "direct_children". The filter is case insensitive. If empty, // then no deployments are filtered. func (c *NodesNodesDeploymentsListCall) Filter(filter string) *NodesNodesDeploymentsListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The maximum number // of deployments to return in the response. func (c *NodesNodesDeploymentsListCall) PageSize(pageSize int64) *NodesNodesDeploymentsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A pagination token // returned from a previous call to ListDeployments that indicates where // this listing should continue from. func (c *NodesNodesDeploymentsListCall) PageToken(pageToken string) *NodesNodesDeploymentsListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesNodesDeploymentsListCall) Fields(s ...googleapi.Field) *NodesNodesDeploymentsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *NodesNodesDeploymentsListCall) IfNoneMatch(entityTag string) *NodesNodesDeploymentsListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesNodesDeploymentsListCall) Context(ctx context.Context) *NodesNodesDeploymentsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesNodesDeploymentsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesNodesDeploymentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/deployments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.nodes.deployments.list" call. // Exactly one of *SasPortalListDeploymentsResponse or error will be // non-nil. Any non-2xx status code is an error. Response headers are in // either *SasPortalListDeploymentsResponse.ServerResponse.Header or (if // a response was returned at all) in error.(*googleapi.Error).Header. // Use googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesNodesDeploymentsListCall) Do(opts ...googleapi.CallOption) (*SasPortalListDeploymentsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalListDeploymentsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Lists deployments.", // "flatPath": "v1alpha1/nodes/{nodesId}/nodes/{nodesId1}/deployments", // "httpMethod": "GET", // "id": "sasportal.nodes.nodes.deployments.list", // "parameterOrder": [ // "parent" // ], // "parameters": { // "filter": { // "description": "The filter expression. The filter should have the following format: \"DIRECT_CHILDREN\" or format: \"direct_children\". The filter is case insensitive. If empty, then no deployments are filtered.", // "location": "query", // "type": "string" // }, // "pageSize": { // "description": "The maximum number of deployments to return in the response.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "A pagination token returned from a previous call to ListDeployments that indicates where this listing should continue from.", // "location": "query", // "type": "string" // }, // "parent": { // "description": "Required. The parent resource name, for example, \"nodes/1\", customer/1/nodes/2.", // "location": "path", // "pattern": "^nodes/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/deployments", // "response": { // "$ref": "SasPortalListDeploymentsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *NodesNodesDeploymentsListCall) Pages(ctx context.Context, f func(*SasPortalListDeploymentsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "sasportal.nodes.nodes.devices.create": type NodesNodesDevicesCreateCall struct { s *Service parent string sasportaldevice *SasPortalDevice urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Create: Creates a device under a node or customer. // // - parent: The name of the parent resource. func (r *NodesNodesDevicesService) Create(parent string, sasportaldevice *SasPortalDevice) *NodesNodesDevicesCreateCall { c := &NodesNodesDevicesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.sasportaldevice = sasportaldevice return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesNodesDevicesCreateCall) Fields(s ...googleapi.Field) *NodesNodesDevicesCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesNodesDevicesCreateCall) Context(ctx context.Context) *NodesNodesDevicesCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesNodesDevicesCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesNodesDevicesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportaldevice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/devices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.nodes.devices.create" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesNodesDevicesCreateCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a device under a node or customer.", // "flatPath": "v1alpha1/nodes/{nodesId}/nodes/{nodesId1}/devices", // "httpMethod": "POST", // "id": "sasportal.nodes.nodes.devices.create", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { // "description": "Required. The name of the parent resource.", // "location": "path", // "pattern": "^nodes/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/devices", // "request": { // "$ref": "SasPortalDevice" // }, // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.nodes.devices.createSigned": type NodesNodesDevicesCreateSignedCall struct { s *Service parent string sasportalcreatesigneddevicerequest *SasPortalCreateSignedDeviceRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // CreateSigned: Creates a signed device under a node or customer. // // - parent: The name of the parent resource. func (r *NodesNodesDevicesService) CreateSigned(parent string, sasportalcreatesigneddevicerequest *SasPortalCreateSignedDeviceRequest) *NodesNodesDevicesCreateSignedCall { c := &NodesNodesDevicesCreateSignedCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.sasportalcreatesigneddevicerequest = sasportalcreatesigneddevicerequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesNodesDevicesCreateSignedCall) Fields(s ...googleapi.Field) *NodesNodesDevicesCreateSignedCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesNodesDevicesCreateSignedCall) Context(ctx context.Context) *NodesNodesDevicesCreateSignedCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesNodesDevicesCreateSignedCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesNodesDevicesCreateSignedCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalcreatesigneddevicerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/devices:createSigned") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.nodes.devices.createSigned" call. // Exactly one of *SasPortalDevice or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalDevice.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesNodesDevicesCreateSignedCall) Do(opts ...googleapi.CallOption) (*SasPortalDevice, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalDevice{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a signed device under a node or customer.", // "flatPath": "v1alpha1/nodes/{nodesId}/nodes/{nodesId1}/devices:createSigned", // "httpMethod": "POST", // "id": "sasportal.nodes.nodes.devices.createSigned", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { // "description": "Required. The name of the parent resource.", // "location": "path", // "pattern": "^nodes/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/devices:createSigned", // "request": { // "$ref": "SasPortalCreateSignedDeviceRequest" // }, // "response": { // "$ref": "SasPortalDevice" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.nodes.devices.list": type NodesNodesDevicesListCall struct { s *Service parent string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Lists devices under a node or customer. // // - parent: The name of the parent resource. func (r *NodesNodesDevicesService) List(parent string) *NodesNodesDevicesListCall { c := &NodesNodesDevicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } // Filter sets the optional parameter "filter": The filter expression. // The filter should have one of the following formats: "sn=123454" or // "display_name=MyDevice". sn corresponds to serial number of the // device. The filter is case insensitive. func (c *NodesNodesDevicesListCall) Filter(filter string) *NodesNodesDevicesListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The maximum number // of devices to return in the response. If empty or zero, all devices // will be listed. Must be in the range [0, 1000]. func (c *NodesNodesDevicesListCall) PageSize(pageSize int64) *NodesNodesDevicesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A pagination token // returned from a previous call to ListDevices that indicates where // this listing should continue from. func (c *NodesNodesDevicesListCall) PageToken(pageToken string) *NodesNodesDevicesListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesNodesDevicesListCall) Fields(s ...googleapi.Field) *NodesNodesDevicesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *NodesNodesDevicesListCall) IfNoneMatch(entityTag string) *NodesNodesDevicesListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesNodesDevicesListCall) Context(ctx context.Context) *NodesNodesDevicesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesNodesDevicesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesNodesDevicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/devices") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.nodes.devices.list" call. // Exactly one of *SasPortalListDevicesResponse or error will be // non-nil. Any non-2xx status code is an error. Response headers are in // either *SasPortalListDevicesResponse.ServerResponse.Header or (if a // response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesNodesDevicesListCall) Do(opts ...googleapi.CallOption) (*SasPortalListDevicesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalListDevicesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Lists devices under a node or customer.", // "flatPath": "v1alpha1/nodes/{nodesId}/nodes/{nodesId1}/devices", // "httpMethod": "GET", // "id": "sasportal.nodes.nodes.devices.list", // "parameterOrder": [ // "parent" // ], // "parameters": { // "filter": { // "description": "The filter expression. The filter should have one of the following formats: \"sn=123454\" or \"display_name=MyDevice\". sn corresponds to serial number of the device. The filter is case insensitive.", // "location": "query", // "type": "string" // }, // "pageSize": { // "description": "The maximum number of devices to return in the response. If empty or zero, all devices will be listed. Must be in the range [0, 1000].", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "A pagination token returned from a previous call to ListDevices that indicates where this listing should continue from.", // "location": "query", // "type": "string" // }, // "parent": { // "description": "Required. The name of the parent resource.", // "location": "path", // "pattern": "^nodes/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/devices", // "response": { // "$ref": "SasPortalListDevicesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *NodesNodesDevicesListCall) Pages(ctx context.Context, f func(*SasPortalListDevicesResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "sasportal.nodes.nodes.nodes.create": type NodesNodesNodesCreateCall struct { s *Service parent string sasportalnode *SasPortalNode urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Create: Creates a new node. // // - parent: The parent resource name where the node is to be created. func (r *NodesNodesNodesService) Create(parent string, sasportalnode *SasPortalNode) *NodesNodesNodesCreateCall { c := &NodesNodesNodesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent c.sasportalnode = sasportalnode return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesNodesNodesCreateCall) Fields(s ...googleapi.Field) *NodesNodesNodesCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesNodesNodesCreateCall) Context(ctx context.Context) *NodesNodesNodesCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesNodesNodesCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesNodesNodesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalnode) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/nodes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.nodes.nodes.create" call. // Exactly one of *SasPortalNode or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalNode.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesNodesNodesCreateCall) Do(opts ...googleapi.CallOption) (*SasPortalNode, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalNode{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a new node.", // "flatPath": "v1alpha1/nodes/{nodesId}/nodes/{nodesId1}/nodes", // "httpMethod": "POST", // "id": "sasportal.nodes.nodes.nodes.create", // "parameterOrder": [ // "parent" // ], // "parameters": { // "parent": { // "description": "Required. The parent resource name where the node is to be created.", // "location": "path", // "pattern": "^nodes/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/nodes", // "request": { // "$ref": "SasPortalNode" // }, // "response": { // "$ref": "SasPortalNode" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.nodes.nodes.nodes.list": type NodesNodesNodesListCall struct { s *Service parent string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Lists nodes. // // - parent: The parent resource name, for example, "nodes/1". func (r *NodesNodesNodesService) List(parent string) *NodesNodesNodesListCall { c := &NodesNodesNodesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } // Filter sets the optional parameter "filter": The filter expression. // The filter should have the following format: "DIRECT_CHILDREN" or // format: "direct_children". The filter is case insensitive. If empty, // then no nodes are filtered. func (c *NodesNodesNodesListCall) Filter(filter string) *NodesNodesNodesListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The maximum number // of nodes to return in the response. func (c *NodesNodesNodesListCall) PageSize(pageSize int64) *NodesNodesNodesListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A pagination token // returned from a previous call to ListNodes that indicates where this // listing should continue from. func (c *NodesNodesNodesListCall) PageToken(pageToken string) *NodesNodesNodesListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *NodesNodesNodesListCall) Fields(s ...googleapi.Field) *NodesNodesNodesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *NodesNodesNodesListCall) IfNoneMatch(entityTag string) *NodesNodesNodesListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *NodesNodesNodesListCall) Context(ctx context.Context) *NodesNodesNodesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *NodesNodesNodesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *NodesNodesNodesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/nodes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.nodes.nodes.nodes.list" call. // Exactly one of *SasPortalListNodesResponse or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either // *SasPortalListNodesResponse.ServerResponse.Header or (if a response // was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *NodesNodesNodesListCall) Do(opts ...googleapi.CallOption) (*SasPortalListNodesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalListNodesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Lists nodes.", // "flatPath": "v1alpha1/nodes/{nodesId}/nodes/{nodesId1}/nodes", // "httpMethod": "GET", // "id": "sasportal.nodes.nodes.nodes.list", // "parameterOrder": [ // "parent" // ], // "parameters": { // "filter": { // "description": "The filter expression. The filter should have the following format: \"DIRECT_CHILDREN\" or format: \"direct_children\". The filter is case insensitive. If empty, then no nodes are filtered.", // "location": "query", // "type": "string" // }, // "pageSize": { // "description": "The maximum number of nodes to return in the response.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "A pagination token returned from a previous call to ListNodes that indicates where this listing should continue from.", // "location": "query", // "type": "string" // }, // "parent": { // "description": "Required. The parent resource name, for example, \"nodes/1\".", // "location": "path", // "pattern": "^nodes/[^/]+/nodes/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1alpha1/{+parent}/nodes", // "response": { // "$ref": "SasPortalListNodesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *NodesNodesNodesListCall) Pages(ctx context.Context, f func(*SasPortalListNodesResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "sasportal.policies.get": type PoliciesGetCall struct { s *Service sasportalgetpolicyrequest *SasPortalGetPolicyRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Get: Gets the access control policy for a resource. Returns an empty // policy if the resource exists and does not have a policy set. func (r *PoliciesService) Get(sasportalgetpolicyrequest *SasPortalGetPolicyRequest) *PoliciesGetCall { c := &PoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sasportalgetpolicyrequest = sasportalgetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *PoliciesGetCall) Fields(s ...googleapi.Field) *PoliciesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *PoliciesGetCall) Context(ctx context.Context) *PoliciesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *PoliciesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *PoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalgetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/policies:get") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.policies.get" call. // Exactly one of *SasPortalPolicy or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalPolicy.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *PoliciesGetCall) Do(opts ...googleapi.CallOption) (*SasPortalPolicy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalPolicy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", // "flatPath": "v1alpha1/policies:get", // "httpMethod": "POST", // "id": "sasportal.policies.get", // "parameterOrder": [], // "parameters": {}, // "path": "v1alpha1/policies:get", // "request": { // "$ref": "SasPortalGetPolicyRequest" // }, // "response": { // "$ref": "SasPortalPolicy" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.policies.set": type PoliciesSetCall struct { s *Service sasportalsetpolicyrequest *SasPortalSetPolicyRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Set: Sets the access control policy on the specified resource. // Replaces any existing policy. func (r *PoliciesService) Set(sasportalsetpolicyrequest *SasPortalSetPolicyRequest) *PoliciesSetCall { c := &PoliciesSetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sasportalsetpolicyrequest = sasportalsetpolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *PoliciesSetCall) Fields(s ...googleapi.Field) *PoliciesSetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *PoliciesSetCall) Context(ctx context.Context) *PoliciesSetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *PoliciesSetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *PoliciesSetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportalsetpolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/policies:set") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.policies.set" call. // Exactly one of *SasPortalPolicy or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *SasPortalPolicy.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *PoliciesSetCall) Do(opts ...googleapi.CallOption) (*SasPortalPolicy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalPolicy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "flatPath": "v1alpha1/policies:set", // "httpMethod": "POST", // "id": "sasportal.policies.set", // "parameterOrder": [], // "parameters": {}, // "path": "v1alpha1/policies:set", // "request": { // "$ref": "SasPortalSetPolicyRequest" // }, // "response": { // "$ref": "SasPortalPolicy" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } } // method id "sasportal.policies.test": type PoliciesTestCall struct { s *Service sasportaltestpermissionsrequest *SasPortalTestPermissionsRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Test: Returns permissions that a caller has on the specified // resource. func (r *PoliciesService) Test(sasportaltestpermissionsrequest *SasPortalTestPermissionsRequest) *PoliciesTestCall { c := &PoliciesTestCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.sasportaltestpermissionsrequest = sasportaltestpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *PoliciesTestCall) Fields(s ...googleapi.Field) *PoliciesTestCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *PoliciesTestCall) Context(ctx context.Context) *PoliciesTestCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *PoliciesTestCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *PoliciesTestCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210409") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sasportaltestpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/policies:test") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "sasportal.policies.test" call. // Exactly one of *SasPortalTestPermissionsResponse or error will be // non-nil. Any non-2xx status code is an error. Response headers are in // either *SasPortalTestPermissionsResponse.ServerResponse.Header or (if // a response was returned at all) in error.(*googleapi.Error).Header. // Use googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *PoliciesTestCall) Do(opts ...googleapi.CallOption) (*SasPortalTestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &SasPortalTestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Returns permissions that a caller has on the specified resource.", // "flatPath": "v1alpha1/policies:test", // "httpMethod": "POST", // "id": "sasportal.policies.test", // "parameterOrder": [], // "parameters": {}, // "path": "v1alpha1/policies:test", // "request": { // "$ref": "SasPortalTestPermissionsRequest" // }, // "response": { // "$ref": "SasPortalTestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/userinfo.email" // ] // } }
NewNodesDeploymentsService
player_robot.go
package main import ( "errors" "fmt" "log" "sort" ) type robotPlayer struct { boardStatus boardCache pColor playerColor maxLevelCount int maxCountEachLevel int maxCheckmateCount int } func newRobotPlayer(color playerColor) player { rp := &robotPlayer{ boardCache: make(boardCache), pColor: color, maxLevelCount: 6, maxCountEachLevel: 16, maxCheckmateCount: 12, } rp.initBoardStatus() return rp } func (r *robotPlayer) color() playerColor { return r.pColor } func (r *robotPlayer) play() (point, error) { if r.count == 0 { p := point{maxLen / 2, maxLen / 2} r.set(p, r.pColor) return p, nil } p1, ok := r.findForm5(r.pColor) if ok { r.set(p1, r.pColor) return p1, nil } p1, ok = r.stop4(r.pColor) if ok { r.set(p1, r.pColor) return p1, nil } for i := 2; i <= r.maxCheckmateCount; i += 2 { if p, ok := r.calculateKill(r.pColor, true, i); ok { return p, nil } } result := r.max(r.maxLevelCount, 100000000) if result == nil
r.set(result.p, r.pColor) return result.p, nil } func (r *robotPlayer) calculateKill(color playerColor, aggressive bool, step int) (point, bool) { p := point{} for i := 0; i < maxLen; i++ { for j := 0; j < maxLen; j++ { p.x, p.y = j, i if r.get(p) == 0 { r.set(p, color) if !r.exists4(color.conversion()) && (!aggressive || r.exists4(color)) { if _, ok := r.calculateKill(color.conversion(), !aggressive, step-1); !ok { r.set(p, 0) return p, true } } r.set(p, 0) } } } return p, false } func (r *robotPlayer) stop4(color playerColor) (point, bool) { p := point{} for i := 0; i < maxLen; i++ { for j := 0; j < maxLen; j++ { p.x, p.y = j, i if r.get(p) == colorEmpty { for _, dir := range fourDirections { leftCount, rightCount := 0, 0 for k := -1; k >= -4; k-- { if p1 := p.move(dir, k); p1.checkRange() && r.get(p1) == color.conversion() { leftCount++ } else { break } } for k := 1; k <= 4; k++ { if p1 := p.move(dir, k); p1.checkRange() && r.get(p1) == color.conversion() { rightCount++ } else { break } } if leftCount+rightCount >= 4 { return p, true } } } } } return p, false } func (r *robotPlayer) exists4(color playerColor) bool { p := point{} for i := 0; i < maxLen; i++ { for j := 0; j < maxLen; j++ { p.x, p.y = j, i if r.get(p) == color || r.get(p) == colorEmpty { for _, dir := range fourDirections { count0, count1 := 0, 0 for k := 0; k <= 4; k++ { pk := p.move(dir, k) if pk.checkRange() { kColor := r.get(pk) if kColor == 0 { count0++ } else if kColor == color { count1++ } } } if count0 == 1 && count1 == 4 { return true } } } } } return false } func (r *robotPlayer) findForm5(color playerColor) (point, bool) { p := point{} for i := 0; i < maxLen; i++ { for j := 0; j < maxLen; j++ { p.x, p.y = j, i if r.get(p) == colorEmpty { for _, dir := range fourDirections { leftCount, rightCount := 0, 0 for k := -1; k >= -4; k-- { if pk := p.move(dir, k); pk.checkRange() && r.get(pk) == color { leftCount++ } else { break } } for k := 1; k <= 4; k++ { if pk := p.move(dir, k); pk.checkRange() && r.get(pk) == color { rightCount++ } else { break } } if leftCount+rightCount >= 4 { return p, true } } } } } return p, false } func (r *robotPlayer) checkForm5ByPoint(p point, color playerColor) bool { if r.get(p) != 0 { return false } r.set(p, color) count := 0 for _, dir := range fourDirections { count = 0 for i := -4; i <= 4; i++ { p2 := p.move(dir, i) if p2.checkRange() && r.get(p2) == color { count++ } else { count = 0 } if count <= i || count == 5 { break } } if count == 5 { break } } r.set(p, colorEmpty) return count == 5 } func (r *robotPlayer) display(p point) error { if r.get(p) != 0 { return errors.New(fmt.Sprintf("illegal argument: %s%s", p, r.get(p))) } r.set(p, r.pColor.conversion()) return nil } func (r *robotPlayer) max(step int, foundminVal int) *pointAndValue { if v := r.getFromCache(r.hash, step); v != nil { return v } var queue pointAndValueSlice p := point{} for i := 0; i < maxLen; i++ { for j := 0; j < maxLen; j++ { p.x, p.y = j, i if r.get(p) == 0 && r.isNeighbor(p) { evathis := r.evaluatePoint(p, r.pColor) queue = append(queue, &pointAndValue{p, evathis}) } } } sort.Sort(queue) if step == 1 { if len(queue) == 0 { log.Println("algorithm error") return nil } p = queue[0].p r.setIfEmpty(p, r.pColor) val := r.evaluateBoard(r.pColor) - r.evaluateBoard(r.pColor.conversion()) r.set(p, colorEmpty) result := &pointAndValue{p, val} r.putIntoCache(r.hash, step, result) return result } maxPoint := point{} maxVal := -100000000 i := 0 for _, obj := range queue { i++ if i > r.maxCountEachLevel { break } p = obj.p r.set(p, r.pColor) boardVal := r.evaluateBoard(r.pColor) - r.evaluateBoard(r.pColor.conversion()) if boardVal > 800000 { r.set(p, 0) result := &pointAndValue{p, boardVal} r.putIntoCache(r.hash, step, result) return result } evathis := r.min(step-1, maxVal).value //最大值最小值法 if evathis >= foundminVal { r.set(p, 0) result := &pointAndValue{p, evathis} r.putIntoCache(r.hash, step, result) return result } if evathis > maxVal || evathis == maxVal && p.nearMidThan(maxPoint) { maxVal = evathis maxPoint = p } r.set(p, 0) } if maxVal < -99999999 { return nil } result := &pointAndValue{maxPoint, maxVal} r.putIntoCache(r.hash, step, result) return result } func (r *robotPlayer) min(step int, foundmaxVal int) *pointAndValue { if v := r.getFromCache(r.hash, step); v != nil { return v } var queue pointAndValueSlice p := point{} for i := 0; i < maxLen; i++ { for j := 0; j < maxLen; j++ { p.x, p.y = j, i if r.get(p) == 0 && r.isNeighbor(p) { evathis := r.evaluatePoint(p, r.pColor.conversion()) queue = append(queue, &pointAndValue{p, evathis}) } } } sort.Sort(queue) if step == 1 { if len(queue) == 0 { log.Println("algorithm error") return nil } p := queue[0].p r.setIfEmpty(p, r.pColor.conversion()) val := r.evaluateBoard(r.pColor) - r.evaluateBoard(r.pColor.conversion()) r.set(p, 0) result := &pointAndValue{p, val} r.putIntoCache(r.hash, step, result) return result } var minPoint point minVal := 100000000 i := 0 for _, obj := range queue { i++ if i > r.maxCountEachLevel { break } p = obj.p r.set(p, r.pColor.conversion()) boardVal := r.evaluateBoard(r.pColor) - r.evaluateBoard(r.pColor.conversion()) if boardVal < -800000 { r.set(p, 0) result := &pointAndValue{p, boardVal} r.putIntoCache(r.hash, step, result) return result } evathis := r.max(step-1, minVal).value //最大值最小值法 if evathis <= foundmaxVal { r.set(p, 0) result := &pointAndValue{p, evathis} r.putIntoCache(r.hash, step, result) return result } if evathis < minVal || evathis == minVal && p.nearMidThan(minPoint) { minVal = evathis minPoint = p } r.set(p, 0) } if minVal > 99999999 { return nil } result := &pointAndValue{minPoint, minVal} r.putIntoCache(r.hash, step, result) return result } func (r *robotPlayer) evaluatePoint(p point, color playerColor) int { return r.evaluatePoint2(p, color, colorBlack) + r.evaluatePoint2(p, color, colorWhite) } func (r *robotPlayer) evaluatePoint2(p point, me playerColor, plyer playerColor) (value int) { numoftwo := 0 getLine := func(p point, dir direction, j int) playerColor { p2 := p.move(dir, j) if p2.checkRange() { return r.get(p2) } return -1 } for _, dir := range eightDirections { // 8个方向 // 活四 01111* *代表当前空位置 0代表其他空位置 下同 if getLine(p, dir, -1) == plyer && getLine(p, dir, -2) == plyer && getLine(p, dir, -3) == plyer && getLine(p, dir, -4) == plyer && getLine(p, dir, -5) == 0 { value += 300000 if me != plyer { value -= 500 } continue } // 死四A 21111* if getLine(p, dir, -1) == plyer && getLine(p, dir, -2) == plyer && getLine(p, dir, -3) == plyer && getLine(p, dir, -4) == plyer && (getLine(p, dir, -5) == plyer.conversion() || getLine(p, dir, -5) == -1) { value += 250000 if me != plyer { value -= 500 } continue } // 死四B 111*1 if getLine(p, dir, -1) == plyer && getLine(p, dir, -2) == plyer && getLine(p, dir, -3) == plyer && getLine(p, dir, 1) == plyer { value += 240000 if me != plyer { value -= 500 } continue } // 死四C 11*11 if getLine(p, dir, -1) == plyer && getLine(p, dir, -2) == plyer && getLine(p, dir, 1) == plyer && getLine(p, dir, 2) == plyer { value += 230000 if me != plyer { value -= 500 } continue } // 活三 近3位置 111*0 if getLine(p, dir, -1) == plyer && getLine(p, dir, -2) == plyer && getLine(p, dir, -3) == plyer { if getLine(p, dir, 1) == 0 { value += 1450 if getLine(p, dir, -4) == 0 { value += 6000 if me != plyer { value -= 300 } } } if (getLine(p, dir, 1) == plyer.conversion() || getLine(p, dir, 1) == -1) && getLine(p, dir, -4) == 0 { value += 500 } if (getLine(p, dir, -4) == plyer.conversion() || getLine(p, dir, -4) == -1) && getLine(p, dir, 1) == 0 { value += 500 } continue } // 活三 远3位置 1110* if getLine(p, dir, -1) == 0 && getLine(p, dir, -2) == plyer && getLine(p, dir, -3) == plyer && getLine(p, dir, -4) == plyer { value += 350 continue } // 死三 11*1 if getLine(p, dir, -1) == plyer && getLine(p, dir, -2) == plyer && getLine(p, dir, 1) == plyer { value += 700 if getLine(p, dir, -3) == 0 && getLine(p, dir, 2) == 0 { value += 6700 continue } if (getLine(p, dir, -3) == plyer.conversion() || getLine(p, dir, -3) == -1) && (getLine(p, dir, 2) == plyer.conversion() || getLine(p, dir, 2) == -1) { value -= 700 continue } else { value += 800 continue } } // 活二的个数(因为会算2次,就2倍) if getLine(p, dir, -1) == plyer && getLine(p, dir, -2) == plyer && getLine(p, dir, -3) == 0 && getLine(p, dir, 1) == 0 { if getLine(p, dir, 2) == 0 || getLine(p, dir, -4) == 0 { numoftwo += 2 } else { value += 250 } } if getLine(p, dir, -1) == plyer && getLine(p, dir, -2) == 0 && getLine(p, dir, 2) == plyer && getLine(p, dir, 1) == 0 && getLine(p, dir, 3) == 0 { numoftwo += 2 } if getLine(p, dir, -1) == 0 && getLine(p, dir, 4) == 0 && getLine(p, dir, 3) == plyer && (getLine(p, dir, 2) == plyer && getLine(p, dir, 1) == 0 || getLine(p, dir, 1) == plyer && getLine(p, dir, 2) == 0) { numoftwo += 2 } if getLine(p, dir, -1) == plyer && getLine(p, dir, 1) == plyer && getLine(p, dir, -2) == 0 && getLine(p, dir, 2) == 0 { if getLine(p, dir, 3) == 0 || getLine(p, dir, -3) == 0 { numoftwo++ } else { value += 125 } } // 其余散棋 numOfplyer := 0 for k := -4; k <= 0; k++ { // ++++* +++*+ ++*++ +*+++ *++++ temp := 0 for l := 0; l <= 4; l++ { if getLine(p, dir, k+l) == plyer { temp += 5 - abs(k+l) } else if getLine(p, dir, k+l) == plyer.conversion() || getLine(p, dir, k+l) == -1 { temp = 0 break } } numOfplyer += temp } value += numOfplyer * 5 } numoftwo /= 2 if numoftwo >= 2 { value += 3000 if me != plyer { value -= 100 } } else if numoftwo == 1 { value += 2725 if me != plyer { value -= 10 } } return } func (r *robotPlayer) evaluateBoard(color playerColor) (values int) { p := point{} for i := 0; i < maxLen; i++ { for j := 0; j < maxLen; j++ { p.x, p.y = j, i if r.get(p) != color { continue } for _, dir := range eightDirections { colors := make([]playerColor, 9) for k := 0; k < 9; k++ { pk := p.move(dir, k-4) if pk.checkRange() { colors[k] = r.get(pk) } else { colors[k] = playerColor(-1) } } if colors[5] == color && colors[6] == color && colors[7] == color && colors[8] == color { values += 1000000 continue } if colors[5] == color && colors[6] == color && colors[7] == color && colors[3] == 0 { if colors[8] == 0 { //?AAAA? values += 300000 / 2 } else if colors[8] != color { //AAAA? values += 25000 } continue } if colors[5] == color && colors[6] == color { if colors[7] == 0 && colors[8] == color { //AAA?A values += 30000 continue } if colors[3] == 0 && colors[7] == 0 { if colors[2] == 0 && colors[8] != color || colors[8] == 0 && colors[2] != color { //??AAA?? values += 22000 / 2 } else if colors[2] != color && colors[2] != 0 && colors[8] != color && colors[8] != 0 { //?AAA? values += 500 / 2 } continue } if colors[3] != 0 && colors[3] != color && colors[7] == 0 && colors[8] == 0 { //AAA?? values += 500 continue } } if colors[5] == color && colors[6] == 0 && colors[7] == color && colors[8] == color { //AA?AA values += 26000 / 2 continue } if colors[5] == 0 && colors[6] == color && colors[7] == color { if colors[3] == 0 && colors[8] == 0 { //?A?AA? values += 22000 } else if (colors[3] != 0 && colors[3] != color && colors[8] == 0) || (colors[8] != 0 && colors[8] != color && colors[3] == 0) { //A?AA? ?A?AA values += 800 } continue } if colors[5] == 0 && colors[8] == color { if colors[6] == 0 && colors[7] == color { //A??AA values += 600 } else if colors[6] == color && colors[7] == 0 { //A?A?A values += 550 / 2 } continue } if colors[5] == color { if colors[3] == 0 && colors[6] == 0 { if colors[1] == 0 && colors[2] == 0 && colors[7] != 0 && colors[7] != color || colors[8] == 0 && colors[7] == 0 && colors[2] != 0 && colors[2] != color { //??AA?? values += 650 / 2 } else if colors[2] != 0 && colors[2] != color && colors[7] == 0 && colors[8] != 0 && colors[8] != color { //?AA?? values += 150 } } else if colors[3] != 0 && colors[3] != color && colors[6] == 0 && colors[7] == 0 && colors[8] == 0 { //AA??? values += 150 } continue } if colors[5] == 0 && colors[6] == color { if colors[3] == 0 && colors[7] == 0 { if colors[2] != 0 && colors[2] != color && colors[8] == 0 || colors[2] == 0 && colors[8] != 0 && colors[8] != color { //??A?A?? values += 250 / 2 } if colors[2] != 0 && colors[2] != color && colors[8] != 0 && colors[8] != color { //?A?A? values += 150 / 2 } } else if colors[3] != 0 && colors[3] != color && colors[7] == 0 && colors[8] == 0 { //A?A?? values += 150 } continue } if colors[5] == 0 && colors[6] == 0 && colors[7] == color { if colors[3] == 0 && colors[8] == 0 { //?A??A? values += 200 / 2 continue } if colors[3] != 0 && colors[3] != color && colors[8] == 0 { //A??A? p5 := p.move(dir, 5) if p5.checkRange() { color5 := r.get(p5) if color5 == 0 { values += 200 } else if color5 != color { values += 150 } } } continue } } } } return values } type pointAndValue struct { p point value int } type pointAndValueSlice []*pointAndValue func (s pointAndValueSlice) Len() int { return len(s) } func (s pointAndValueSlice) Less(i, j int) bool { return s[i].value > s[j].value } func (s pointAndValueSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
{ return point{}, errors.New("algorithm error") }
Handler.py
#!/usr/bin/env python # -*- coding: UTF-8 -*- #------------------------------------------------------------------------------- """pyzombie HTTP RESTful resource handler.""" __author__ = ('Lance Finn Helsten',) __version__ = '1.0.1' __copyright__ = """Copyright 2009 Lance Finn Helsten ([email protected])""" __license__ = """ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ __docformat__ = "reStructuredText en" __all__ = ['Handler'] import sys import os from datetime import datetime import mimetypes import hashlib import re import cgi import cgitb import http.client from .ZombieConfig import config, datadir from .Executable import Executable #cgitb.enable() ### ### TODO ### ### Pay attention to If-Modified-Since to allow return of 304 Not Modified ### Pay attention to If-None-Match to allow return of 304 Not Modified ### Pay attention to If-Unmodified-Since ### Pay attention to If-Modified-Since CHUNK_SIZE = 256 FLUSHED = "Flushed" class Handler: """Holds all the information necessary to handle a single resource dispatch. Properties ---------- executable The Executable object for this handler. In rare cases no executable can be determined so this will return None. """ @classmethod def initdispatch(cls, regex, allow, help): cls.regex = re.compile(regex) cls.allow = allow cls.help = help return cls @classmethod def match(cls, path): """Check to see if the path is recognized by the dispatch handler, if so then return a dictionary of recognized parts, otherwise return None.""" ret = None mo = cls.regex.match(path) if mo != None: ret = mo.groupdict() return ret def __init__(self, req, urlargs): self.req = req self.urlargs = urlargs self.content = "Single" self.nocache = False self.__status = None self.headers = {} self.lines = [] @property def status(self): return self.__status @status.setter def status(self, value): self.__status = value @property def startstamp(self): return self.req.server.stamp @property def startstamprfc850(self): return self.req.date_time_string() @property def datadir(self): return datadir() @property def executable(self, mediatype=None): if not hasattr(self, "_Handler__executable"): self.initexecutable() return self.__executable @property def accept(self): """Return an ordered set of media types that will be accepted.""" if not hasattr(self, "acceptset"): astr = self.req.headers["Accept"] if astr is None: astr = "text/html" self.acceptset = self.__parseq(astr) self.acceptset.append(None) return self.acceptset @property def acceptlanguage(self): """Return an ordered set of languages that will be accepted.""" if not hasattr(self, "acceptlangset"): astr = self.req.headers["Accept-Language"] if astr is None: astr = "en" self.acceptlangset = self.__parseq(astr) self.acceptlangset.append(None) return self.acceptlangset @property def acceptencoding(self): """Return an ordered set of langauges that will be accepted.""" if not hasattr(self, "acceptencset"): astr = self.req.headers["Accept-Encoding"] if astr is None: astr = "" self.acceptencset = self.__parseq(astr) self.acceptencset.append(None) return self.acceptencset def __parseq(self, astr): qre = re.compile(r"([a-zA-Z*]+/[a-zA-Z*]+)(\s*;\s*q=(\d+(\.\d+))?)?") astr = astr.split(",") aset = ["DUMMY"] weight = [0.0] for a in astr: q = 1.0 m = qre.match(a.strip()) if m: a = m.group(1) if m.group(3): q = float(m.group(3)) for i, w in enumerate(weight): if q > w: aset.insert(i, a) weight.insert(i, q) break return aset[:-1] def initexecutable(self, mediatype=None): """This will initialize the executable property with a given media type. Generally using the executable property directly will give correct results. This is really only used when POST of a new exectuable occurs.""" if hasattr(self, "_Handler__executable"): raise AttributeError("Executable property is already initialized.") if 'execname' in self.urlargs: name = self.urlargs['execname'] else: name = Executable.createname() self.__executable = Executable.getcached(name, mediatype) def serverurl(self, path): """Given a path to a resource create a full URL to that resource. Parameters ---------- path The relative path on the server to the resource. Return ------ The URL that can be given to this server to find the given resource. """ return "http://{0}:{1}/{2}".format( self.req.server.server_name, self.req.server.server_port, path) def rfile_safe(self):
def multipart(self): ctype, pdict = cgi.parse_header(self.req.headers['Content-Type']) if ctype != 'multipart/form-data': self.error(http.client.UNSUPPORTED_MEDIA_TYPE) return None fp = self.rfile_safe() fs = cgi.FieldStorage(fp=fp, headers=self.req.headers, environ={'REQUEST_METHOD':'POST'}, strict_parsing=True) return fs def readline(self): """Read a single line from the input stream in decoded format.""" pass def writeline(self, line): """Write a single line of text to the output stream.""" self.lines.append(line) def writelines(self, lines): """Write a string one line at a time to the output stream.""" for l in lines.splitlines(): self.writeline(l) def writefile(self, path): """Read and then write the file from the given path to the output stream. This will write all the headers before the file. If there is an error reading the file then the appropriate HTTP error code will be sent. This is meant for static files. Dynamic files should use writeline or writelines to operate. Parameters ---------- path The normalized path to the file. """ if os.path.isfile(path): mediatype, enc = mimetypes.guess_type(path) self.writefp(open(path, "rb"), mediatype=mediatype, enc=enc) else: self.error(http.client.NOT_FOUND) def writefp(self, fp, mediatype="text/plain", enc=None, chunked=None): """Read from the given file object and write the data to the output stream. If this is chunked then this will not return until the input file object is closed. Parameters ---------- fp The file type object to read from. chunked If not ``None`` then the data should be sent in a chunked manner, and the value should be a function that returns a boolean value to indicate all data has been sent. The default is no chunked. """ self.req.send_response(http.client.OK) self.req.send_header("Cache-Control", "public max-age={0}".format(self.req.server.maxagestatic)) self.req.send_header("Last-Modified", self.req.date_time_string()) if mediatype == None: self.req.send_header("Content-Type", "application/octet-stream") else: if mediatype in ["text/plain", "text/html"]: mediatype = "{0};UTF-8".format(mediatype) self.req.send_header("Content-Type", mediatype) if enc != None: self.req.send_header("Content-Encoding", enc) if chunked is not None: self.__etag_init() self.content = "Chunked" self.req.send_header("Transfer-Encoding", "chunked") self.req.end_headers() length = 0 done = False while not done: data = fp.read(CHUNK_SIZE) while not data and not done: data = fp.read(CHUNK_SIZE) done = chunked() if data: datalen = len(data) length = length + datalen self.__etag_feed(data) self.req.wfile.write("{0:x}".format(datalen).encode("UTF-8")) self.req.wfile.write(os.linesep.encode("UTF-8")) if isinstance(data, str): self.req.wfile.write(data.encode("UTF-8")) elif isinstance(data, bytes): self.req.wfile.write(data) self.req.wfile.write(os.linesep.encode("UTF-8")) self.req.wfile.write(b"0") self.req.wfile.write(os.linesep.encode("UTF-8")) self.req.send_header("Cache-Control", "public max-age={0}".format(self.req.server.maxagedynamic)) self.req.send_header("ETag", self.__etag_value()) self.req.wfile.write(os.linesep.encode("UTF-8")) self.content = FLUSHED else: data = fp.read() self.req.send_header("ETag", self.etag(data)) self.req.send_header("Content-Length", len(data)) self.req.end_headers() self.req.wfile.write(data) self.content = FLUSHED def error(self, code, message=None): self.req.send_error(code, message=message) self.content = FLUSHED def flush(self): """Flush the headers if they have not been written and all the lines that have been written to the http output stream.""" if self.content == FLUSHED: return self.lines.append("") buf = os.linesep.join(self.lines).encode("UTF-8") self.lines = [] if not self.nocache: if "Cache-Control" not in self.headers: self.headers["Cache-Control"] = "public max-age={0}".format(self.req.server.maxagedynamic) if "ETag" not in self.headers: self.headers["ETag"] = self.etag(buf) if self.content in ["Headers", "Single", "Chunked"]: self.req.send_response(self.status) for k in self.headers: self.req.send_header(k, self.headers[k]) if self.content == "Headers": self.req.end_headers() self.content = FLUSHED elif self.content == "Single": self.req.send_header("Content-Length", len(buf)) self.req.end_headers() self.req.wfile.write(buf) self.content = FLUSHED elif self.content == "Chunked": pass def etag(self, data): """Build an ETag representation for the data associated with the given name.""" self.__etag_init() self.__etag_feed(data) return self.__etag_value() def __etag_init(self): self.__etag = hashlib.md5() def __etag_feed(self, data): if isinstance(data, str): self.__etag.update(data.encode("UTF-8")) elif isinstance(data, bytes): self.__etag.update(data) else: self.__etag.update(str(data).encode("UTF-8")) def __etag_value(self): return self.__etag.hexdigest() def __getitem__(self, key): return self.headers[key] def __setitem__(self, key, value): self.headers[key] = value class HttpServerFP(): """This will wrap the http.server request rfile so an EOF will be returned when reading from the rfile. That way the Content-Length is always handled correctly. This will also convert the binary stream into a character stream. """ def __init__(self, req): self.req = req self.clen = int(self.req.headers['Content-Length']) self.rfile = self.req.rfile def read(self, size=-1): if size < 0: size = self.clen if size > self.clen: size = self.clen ret = '' if size > 0: ret = self.rfile.read(size) self.clen = self.clen - len(ret) ret = str(ret, 'UTF-8') return ret
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$") if sys.version_info >= (3, 2): return self.req.rfile else: return HttpServerFP(self.req)
main.rs
mod collector; mod finding; mod out; mod repo; mod src; mod tool; mod utils; use collector::CollectorTask; use indicatif::{ProgressBar, ProgressStyle}; use log::info; use quicli::prelude::*; use structopt::StructOpt; use utils::setup_logger; /// Run the command line interface of the `collector`. fn main() -> CliResult { setup_logger().unwrap_or_else(|err| eprintln!("Failed to initialize logger: {}", err)); info!("Starting the collector"); let args = Cli::from_args(); let repo_file = read_file(&args.input_path)?; let repos: Vec<&str> = repo_file .lines() .skip(args.repo_skips) .take(args.repo_count) .collect(); let mut tasks: Vec<CollectorTask> = Vec::new(); if args.clone { tasks.push(CollectorTask::CloneRepos); } if args.metrics { tasks.push(CollectorTask::CollectMetrics); }
tasks.push(CollectorTask::DeleteTmp); } let sty = ProgressStyle::default_bar() .template( "[{elapsed_precise}] [{wide_bar:.cyan/blue}] Task {pos:>2}/{len:2} Batch \ {msg:>2} ETA {eta:>1}", ) .progress_chars("#>-"); let progress_bar = ProgressBar::new(repos.len() as u64 * tasks.len() as u64) .with_style(sty) .with_message("0/?"); progress_bar.enable_steady_tick(1000); let batches = repos.chunks(args.batch_size); let num_batches = batches.len(); for (i, batch) in batches.into_iter().enumerate() { progress_bar.set_message(format!("{}/{}", i + 1, num_batches)); for task in &tasks { collector::collect(batch.to_vec(), task, &progress_bar) .unwrap_or_else(|err| error!("Failed to run task on batch: {}", err)); } } progress_bar.finish(); info!("Done with {} repositories", repos.len()); Ok(()) } /// This struct contains all arguments which can be passed to the `collector` /// CLI. They can be used to define a pipeline with each stage being one /// optional flag. The base path of the mentioned output folders can be /// specified using the `DATA_PATH` environment variable. /// /// # Examples /// /// ```sh /// ./collector -n 3 -s 3 -c -m -f -d /// ``` /// /// This command runs on the 4th to 6th repository on the `awesome-rust` list /// and **c**lones them, collects the **m**etrics, **f**ilters them and /// **d**eletes the temporary files afterwards. #[derive(Debug, StructOpt)] struct Cli { /// Path to use for the links to the GitHub repositories. By default, the /// file contains links from the `awesome-rust` list. #[structopt( long = "input_path", short = "p", default_value = "../data/collector/in/awesome-rust.txt" )] input_path: String, /// Number of repositories to be cloned from the list. #[structopt(long = "repo_count", short = "n", default_value = "1")] repo_count: usize, /// Number of repositories to be skipped on the list. #[structopt(long = "repo_skips", short = "s", default_value = "0")] repo_skips: usize, /// Number of repositories per batch #[structopt(long = "batch_size", short = "b", default_value = "16")] batch_size: usize, /// Option for cloning the repositories into the `tmp` folder. If `-d` /// is omitted, this flag only needs to be set once. #[structopt(long = "clone_repos", short = "c")] clone: bool, /// Option to collect metrics. The output of this step can be found in the /// `out` directory. #[structopt(long = "collect_metrics", short = "m")] metrics: bool, /// Option to filter the metrics. This reorganizes and filters the raw /// outputs from the `out` directory and saves the results in `res`. #[structopt(long = "filter_metrics", short = "f")] filter: bool, /// Option to delete the temporary files immediately after the metrics are /// collected and filtered. This can be useful in order to save disk space /// when `-n` is large. #[structopt(long = "delete_tmp", short = "d")] delete: bool, }
if args.filter { tasks.push(CollectorTask::FilterMetrics); } if args.delete {
querier_test.go
package keeper_test import ( "fmt" "github.com/KuChainNetwork/kuchain/chain/constants" chainType "github.com/KuChainNetwork/kuchain/chain/types" "github.com/KuChainNetwork/kuchain/x/asset" assettypes "github.com/KuChainNetwork/kuchain/x/asset/types" "github.com/KuChainNetwork/kuchain/x/supply/types" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" "strings" "testing" //"github.com/stretchr/testify/require" //abci "github.com/tendermint/tendermint/abci/types" keep "github.com/KuChainNetwork/kuchain/x/supply/keeper" //"github.com/KuChainNetwork/kuchain/x/supply/types" sdk "github.com/cosmos/cosmos-sdk/types" ) func initCoin(t *testing.T, ctx sdk.Context, assetKeeper asset.Keeper, coin chainType.Coin, id chainType.AccountID)
func fInitCoins(t *testing.T, ctx sdk.Context, ask asset.Keeper, coins chainType.Coins, id chainType.AccountID) { for _, c := range coins { initCoin(t, ctx, ask, c, id) } } func TestNewQuerier(t *testing.T) { app, ctx := createTestApp(false) keeper := app.SupplyKeeper() cdc := app.Codec() supplyCoins := chainType.NewCoins( chainType.NewCoin(constants.DefaultBondDenom, sdk.NewInt(100)), chainType.NewCoin(constants.ChainMainNameStr+"/"+"photon", sdk.NewInt(50)), chainType.NewCoin(constants.ChainMainNameStr+"/"+"atom", sdk.NewInt(2000)), chainType.NewCoin(constants.ChainMainNameStr+"/"+"btc", sdk.NewInt(21000000)), ) supplyAcc := keeper.GetModuleAccount(ctx, types.ModuleName).GetID() fInitCoins(t, ctx, *app.AssetKeeper(), supplyCoins, supplyAcc) query := abci.RequestQuery{ Path: "", Data: []byte{}, } // querier := keep.NewQuerier(*keeper) bz, err := querier(ctx, []string{"other"}, query) require.Error(t, err) require.Nil(t, bz) queryTotalSupplyParams := types.NewQueryTotalSupplyParams(1, 20) bz, errRes := cdc.MarshalJSON(queryTotalSupplyParams) require.Nil(t, errRes) query.Path = fmt.Sprintf("/custom/supply/%s", types.QueryTotalSupply) query.Data = bz _, err = querier(ctx, []string{types.QueryTotalSupply}, query) require.Nil(t, err) querySupplyParams := types.NewQuerySupplyOfParams(constants.DefaultBondDenom) bz, errRes = cdc.MarshalJSON(querySupplyParams) require.Nil(t, errRes) query.Path = fmt.Sprintf("/custom/supply/%s", types.QuerySupplyOf) query.Data = bz _, err = querier(ctx, []string{types.QuerySupplyOf}, query) require.Nil(t, err) } func TestQuerySupply(t *testing.T) { app, ctx := createTestApp(false) keeper := *app.SupplyKeeper() cdc := app.Codec() supplyCoins := chainType.NewCoins( chainType.NewCoin(constants.DefaultBondDenom, sdk.NewInt(100)), chainType.NewCoin(constants.ChainMainNameStr+"/"+"photon", sdk.NewInt(50)), chainType.NewCoin(constants.ChainMainNameStr+"/"+"atom", sdk.NewInt(2000)), chainType.NewCoin(constants.ChainMainNameStr+"/"+"btc", sdk.NewInt(21000000)), ) supplyAcc := keeper.GetModuleAccount(ctx, types.ModuleName).GetID() fInitCoins(t, ctx, *app.AssetKeeper(), supplyCoins, supplyAcc) query := abci.RequestQuery{ Path: "", Data: []byte{}, } querier := keep.NewQuerier(keeper) //keeper.SetSupply(ctx, types.NewSupply(supplyCoins)) queryTotalSupplyParams := types.NewQueryTotalSupplyParams(1, 10) bz, errRes := cdc.MarshalJSON(queryTotalSupplyParams) require.Nil(t, errRes) query.Path = fmt.Sprintf("/custom/supply/%s", types.QueryTotalSupply) query.Data = bz res, err := querier(ctx, []string{types.QueryTotalSupply}, query) require.Nil(t, err) var totalCoins chainType.Coins errRes = cdc.UnmarshalJSON(res, &totalCoins) require.Nil(t, errRes) require.Equal(t, supplyCoins, totalCoins) querySupplyParams := types.NewQuerySupplyOfParams(constants.DefaultBondDenom) bz, errRes = cdc.MarshalJSON(querySupplyParams) require.Nil(t, errRes) query.Path = fmt.Sprintf("/custom/supply/%s", types.QuerySupplyOf) query.Data = bz res, err = querier(ctx, []string{types.QuerySupplyOf}, query) require.Nil(t, err) var supply sdk.Int errRes = supply.UnmarshalJSON(res) require.Nil(t, errRes) require.True(sdk.IntEq(t, sdk.NewInt(100), supply)) }
{ intNum, _ := sdk.NewIntFromString("80000000000000000000000") intMaxNum, _ := sdk.NewIntFromString("100000000000000000000000") TestMaster := constants.ChainMainNameStr MasterName, _ := chainType.NewName(TestMaster) Master := chainType.NewAccountIDFromName(MasterName) Symbol := strings.Split(coin.Denom, "/") SymbolName, _ := chainType.NewName(Symbol[1]) assetKeeper.Create(ctx, MasterName, SymbolName, assettypes.NewCoin(coin.Denom, intNum), true, true, 0, assettypes.NewCoin(coin.Denom, intMaxNum), []byte("create")) assetKeeper.Issue(ctx, MasterName, SymbolName, assettypes.NewCoin(coin.Denom, coin.Amount)) Coins := chainType.NewCoins(chainType.NewCoin(coin.Denom, coin.Amount)) err := assetKeeper.Transfer(ctx, Master, id, Coins) require.Nil(t, err) }
Loader.js
import React from "react"; import styled from "@emotion/styled"; import { BeatLoader } from "react-spinners"; const LoaderEl = styled.div` height: 100vh; display: flex; align-items: center; & > div { display: block; width: 100%; text-align: center; } `; export function Loader() { return ( <LoaderEl> <BeatLoader color="#333" /> </LoaderEl>
); }
[hotspotid].js
import { useState, useEffect } from 'react' import { Row, Typography, Checkbox, Tooltip } from 'antd' import { Client, Network } from '@helium/http' import Fade from 'react-reveal/Fade' import Checklist from '../../components/Hotspots/Checklist/Checklist'
import RewardSummary from '../../components/Hotspots/RewardSummary' import Link from 'next/link' import dynamic from 'next/dynamic' import AppLayout, { Content } from '../../components/AppLayout' import AccountIcon from '../../components/AccountIcon' import ActivityList from '../../components/ActivityList' import WitnessesList from '../../components/WitnessesList' import HotspotImg from '../../public/images/hotspot.svg' import NearbyHotspotsList from '../../components/NearbyHotspotsList' import animalHash from 'angry-purple-tiger' import { formatHotspotName, formatLocation, } from '../../components/Hotspots/utils' import sumBy from 'lodash/sumBy' import { fetchNearbyHotspots, getHotspotRewardsBuckets, } from '../../data/hotspots' import Hex from '../../components/Hex' import { generateRewardScaleColor } from '../../components/Hotspots/utils' const HotspotMapbox = dynamic( () => import('../../components/Hotspots/HotspotMapbox'), { ssr: false, loading: () => <div style={{ height: 400, width: '100%' }} />, }, ) const { Title, Text } = Typography const HotspotView = ({ hotspot }) => { const [showWitnesses, setShowWitnesses] = useState(true) const [showNearbyHotspots, setShowNearbyHotspots] = useState(true) const [witnesses, setWitnesses] = useState([]) const [activity, setActivity] = useState({}) const [rewards, setRewards] = useState([]) const [nearbyHotspots, setNearbyHotspots] = useState([]) const [witnessesLoading, setWitnessesLoading] = useState(true) const [activityLoading, setActivityLoading] = useState(true) const [rewardsLoading, setRewardsLoading] = useState(true) const [nearbyHotspotsLoading, setNearbyHotspotsLoading] = useState(true) const [loading, setLoading] = useState(true) useEffect(() => { setLoading( !( !witnessesLoading && !activityLoading && !rewardsLoading && !nearbyHotspotsLoading ), ) }, [witnessesLoading, activityLoading, rewardsLoading, nearbyHotspotsLoading]) useEffect(() => { const client = new Client(Network.testnet) const hotspotid = hotspot.address async function getWitnesses() { setWitnessesLoading(true) // // TODO convert to use @helium/http const witnesses = await fetch( `https://testnet-api.helium.wtf/v1/hotspots/${hotspotid}/witnesses`, ) .then((res) => res.json()) .then((json) => json.data.filter((w) => !(w.address === hotspotid))) setWitnesses(witnesses) setWitnessesLoading(false) } async function getNearbyHotspots() { setNearbyHotspotsLoading(true) const hotspots = await fetchNearbyHotspots(hotspot.lat, hotspot.lng, 2000) setNearbyHotspots(hotspots.filter((h) => h.address !== hotspotid)) setNearbyHotspotsLoading(false) } async function getHotspotActivity() { setActivityLoading(true) // Get most recent challenger transaction const challengerTxnList = await client.hotspot(hotspotid).activity.list({ filterTypes: ['poc_request_v1'], }) const challengerTxn = await challengerTxnList.take(1) // Get most recent challengee transaction const challengeeTxnList = await client.hotspot(hotspotid).activity.list({ filterTypes: ['poc_receipts_v1', 'poc_receipts_v2'], }) const challengeeTxn = await challengeeTxnList.take(1) // Get most recent rewards transactions to search for... const rewardTxnsList = await client.hotspot(hotspotid).activity.list({ filterTypes: ['rewards_v1'], }) const rewardTxns = await rewardTxnsList.take(200) let witnessTxn = null // most recent witness transaction rewardTxns.some(function (txn) { return txn.rewards.some(function (txnReward) { if (txnReward.type === 'poc_witnesses') { witnessTxn = txn return } }) }) let dataTransferTxn = null // most recent data credit transaction rewardTxns.some(function (txn) { return txn.rewards.some(function (txnReward) { if (txnReward.type === 'data_credits') { dataTransferTxn = txn return } }) }) const hotspotActivity = { challengerTxn: challengerTxn.length === 1 ? challengerTxn[0] : null, challengeeTxn: challengeeTxn.length === 1 ? challengeeTxn[0] : null, witnessTxn: witnessTxn, dataTransferTxn: dataTransferTxn, } setActivity(hotspotActivity) setActivityLoading(false) } async function getHotspotRewards() { setRewardsLoading(true) const sixtyDays = await getHotspotRewardsBuckets(hotspotid, 60, 'day') const fourtyEightHours = await getHotspotRewardsBuckets( hotspotid, 48, 'hour', ) const oneYear = await getHotspotRewardsBuckets(hotspotid, 365, 'day') setRewards({ buckets: { days: sixtyDays, hours: fourtyEightHours, year: oneYear }, day: sumBy(sixtyDays.slice(0, 1), 'total'), previousDay: sumBy(sixtyDays.slice(1, 2), 'total'), week: sumBy(sixtyDays.slice(0, 7), 'total'), previousWeek: sumBy(sixtyDays.slice(7, 14), 'total'), month: sumBy(sixtyDays.slice(0, 30), 'total'), previousMonth: sumBy(sixtyDays.slice(30, 60), 'total'), oneYear: sumBy(oneYear, 'total'), }) setRewardsLoading(false) } getWitnesses() getNearbyHotspots() getHotspotActivity() getHotspotRewards() }, [hotspot.address]) return ( <AppLayout title={`${animalHash(hotspot.address)} | Hotspot `} description={`A Helium Hotspot ${ hotspot.location ? `located in ${formatLocation(hotspot?.geocode)}` : `with no location asserted` }, belonging to account ${hotspot.owner}`} openGraphImageAbsoluteUrl={`https://explorer.helium.com/images/og/hotspots.png`} url={`https://explorer.helium.com/hotspots/${hotspot.address}`} > <Content style={{ marginTop: 0, background: '#222e46', padding: '0px 0 0px', }} > <div style={{ margin: '0 auto', maxWidth: 850 + 40 }} className="content-container-hotspot-view" > <HotspotMapbox hotspot={hotspot} witnesses={witnesses} showWitnesses={showWitnesses} nearbyHotspots={nearbyHotspots} showNearbyHotspots={showNearbyHotspots} /> {hotspot.lng !== undefined && hotspot.lat !== undefined && ( <div style={{ display: 'flex', justifyContent: 'space-between', paddingTop: 10, color: 'white', width: '100%', }} > <p style={{ marginBottom: '-20px', fontWeight: 600 }}> {formatLocation(hotspot?.geocode)} </p> <div> <Checkbox onChange={(e) => setShowNearbyHotspots(e.target.checked)} checked={showNearbyHotspots} style={{ color: 'white' }} > Show nearby hotspots </Checkbox> <Checkbox onChange={(e) => setShowWitnesses(e.target.checked)} checked={showWitnesses} style={{ color: 'white' }} > Show witnesses </Checkbox> </div> </div> )} <Row style={{ paddingTop: 30 }}> <div className="flexwrapper" style={{ width: '100%', justifyContent: 'flex-start', alignItems: 'flex-start', // marginBottom: 50, paddingRight: 20, }} > <div style={{ width: '100%' }}> <Fade delay={500}> <div style={{ display: 'flex', flexDirection: 'row', alignItems: 'center', justifyContent: 'flex-start', padding: '0 0 8px 0', width: 'auto', }} > <div style={{ display: 'flex', flexDirection: 'row', alignItems: 'center', justifyContent: 'center', padding: '2px 10px', backgroundColor: '#182035', borderRadius: '20px', }} > <Tooltip placement="top" title={`Hotspot is ${hotspot.status.online}`} > <div style={{ height: 10, minWidth: 10, width: 10, // marginLeft: 15, backgroundColor: hotspot.status.online === 'online' ? '#32C48D' : '#fb6666', borderRadius: 20, }} ></div> </Tooltip> <Tooltip placement="top" title={`${ hotspot.status.online === 'online' && hotspot.status.height === null ? 'Beginning to sync' : hotspot.status.online === 'online' && hotspot.status.height !== null ? `Syncing block ${hotspot.status?.height.toLocaleString()}. ` : 'Hotspot is not syncing. ' }${ hotspot.status.online === 'online' && hotspot.status.height !== null ? `Blocks remaining: ${( hotspot.block - hotspot.status?.height ).toLocaleString()}.` : `` }`} > <p style={{ marginBottom: 0, color: '#8283B2', marginLeft: 8, }} > {hotspot.status.online === 'offline' ? `Offline` : hotspot.block - hotspot.status?.height >= 500 || hotspot.status.height === null ? `Syncing` : `Synced`} </p> </Tooltip> </div> {hotspot.rewardScale && ( <div style={{ display: 'flex', flexDirection: 'row', marginLeft: '10px', alignItems: 'center', justifyContent: 'center', padding: '2px 10px', backgroundColor: '#182035', borderRadius: '20px', }} > <Tooltip placement="top" title={`Reward scale: ${hotspot.rewardScale}`} > <span style={{ display: 'flex', alignItems: 'center', justifyContent: 'center', }} > <Hex width={10.5} height={12} fillColor={generateRewardScaleColor( hotspot.rewardScale, )} /> </span> </Tooltip> <Tooltip placement="top" title={`A Hotspot's own reward scale does not impact its earnings. Hotspots witnessing this Hotspot will see their rewards scaled up or down according to this Hotspot's reward scale.`} > <p style={{ marginBottom: 0, color: '#8283B2', marginLeft: 8, }} > {hotspot.rewardScale.toLocaleString(undefined, { minimumFractionDigits: 2, maximumFractionDigits: 2, })} </p> </Tooltip> </div> )} </div> </Fade> <span className="hotspot-name"> <Title style={{ color: 'white', fontSize: 52, marginTop: 10, letterSpacing: '-2px', marginBottom: 17, }} > {formatHotspotName(hotspot.name)} </Title> </span> <Tooltip placement="bottom" title="Hotspot Network Address"> <img src={HotspotImg} style={{ height: 15, marginRight: 5, position: 'relative', top: '-2px', }} alt="Hotspot Network Address" /> <Text copyable style={{ color: '#8283B2', wordBreak: 'break-all', }} > {hotspot.address} </Text> </Tooltip> </div> </div> </Row> </div> <div style={{ maxWidth: 850 + 40, margin: '0 auto', paddingBottom: 50, marginTop: 40, }} > <Checklist hotspot={hotspot} witnesses={witnesses} activity={activity} loading={loading} rewardsLoading={rewardsLoading} witnessesLoading={witnessesLoading} activityLoading={activityLoading} /> </div> <div style={{ width: '100%', backgroundColor: 'rgb(24, 32, 53)', padding: '20px', textAlign: 'center', }} > <Content style={{ maxWidth: 850, margin: '0 auto' }}> <p style={{ color: 'white', margin: 0, display: 'flex', justifyContent: 'center', }} > Owned by: <br className="line-break-only-at-small" /> <span style={{ width: 21, marginLeft: 8, marginRight: 2 }}> <AccountIcon address={hotspot.owner} size={18} /> </span> <Link href={'/accounts/' + hotspot.owner}> <a style={{ wordBreak: 'break-all' }}>{hotspot.owner}</a> </Link> </p> </Content> </div> </Content> <Content style={{ margin: '0 auto', maxWidth: 850, paddingBottom: 20, marginTop: 0, }} > <RewardSummary rewardsLoading={rewardsLoading} rewards={rewards} /> </Content> <Content style={{ margin: '0 auto', maxWidth: 850, paddingBottom: 20, marginTop: 0, }} > <WitnessesList witnessesLoading={witnessesLoading} witnesses={witnesses} /> </Content> <Content style={{ margin: '0 auto', maxWidth: 850, paddingBottom: 20, marginTop: 0, }} > <NearbyHotspotsList nearbyHotspotsLoading={nearbyHotspotsLoading} nearbyHotspots={nearbyHotspots} /> </Content> <Content style={{ marginTop: '20px', margin: '0 auto', maxWidth: 850, paddingBottom: 100, }} > <ActivityList type="hotspot" address={hotspot.address} /> </Content> </AppLayout> ) } export async function getStaticPaths() { return { paths: [], fallback: 'blocking', } } export async function getStaticProps({ params }) { const client = new Client(Network.testnet) const { hotspotid } = params const hotspot = await client.hotspots.get(hotspotid) return { props: { hotspot: JSON.parse(JSON.stringify(hotspot)), }, revalidate: 10, } } export default HotspotView
user.module.ts
import { NgModule } from '@angular/core'; import { ProcessModule } from '../process/process.module'; import { UserService } from './facade/index'; import { UserStoreModule } from './store/user-store.module'; @NgModule({
}) export class UserModule {}
imports: [UserStoreModule, ProcessModule], providers: [UserService],
genres.rs
use super::spotify_api::{ endpoints::{ARTISTS_INFO, SAVED_TRACKS}, models::{ArtistsResponse, SavedTrack, Track}, }; use super::CmdHandler; use console::style; use dialoguer::Checkboxes; use indicatif::{ProgressBar, ProgressStyle}; use itertools::Itertools; use std::cmp::min; use std::collections::HashMap; use std::convert::TryInto; use std::error::Error; impl CmdHandler { pub fn genres(&self) -> Result<(), Box<dyn Error>> { println!("Loading your library information..."); let saved_tracks = self.paged_request::<SavedTrack>(SAVED_TRACKS)?; println!("Library loaded."); println!("Getting genre information..."); let mut artist_map = HashMap::new(); for saved_track in &saved_tracks { let track = &saved_track.track; artist_map .entry(&track.artists[0].id) .and_modify(|artist_tracks: &mut Vec<&Track>| { artist_tracks.push(track); }) .or_insert_with(|| vec![track]); } let artists = artist_map.keys().collect::<Vec<_>>(); let chunks = artists.chunks(50); let mut genre_map: HashMap<String, Vec<(String, String)>> = HashMap::new(); let progress = ProgressBar::new(artists.len().try_into().unwrap()).with_style( ProgressStyle::default_bar() .template("[{wide_bar}] {pos}/{len}") .progress_chars("=> "), ); for chunk in chunks { let data = self
.json::<ArtistsResponse>()?; progress.inc(50); for artist in &data.artists { for genre in &artist.genres { genre_map .entry(String::from(genre)) .and_modify(|artists: &mut Vec<(String, String)>| { artists.push((String::from(&artist.id), String::from(&artist.name))); }) .or_insert_with(|| { vec![(String::from(&artist.id), String::from(&artist.name))] }); } genre_map.insert( String::from(&artist.name), vec![(String::from(&artist.id), String::from(&artist.name))], ); } } progress.finish_and_clear(); println!("Genre information loaded."); let genres = genre_map .into_iter() .map(|(genre, artists)| { ( genre, ( artists .iter() .map(|artist| artist_map.get(&artist.0).unwrap()) .flatten() .collect::<Vec<_>>(), artists .into_iter() .map(|artist| artist.1) .collect::<Vec<_>>(), ), ) }) .sorted_by( |(_id1, (tracks1, _artists1)), (_id2, (tracks2, _artists2))| { tracks2.len().cmp(&tracks1.len()) }, ) .collect::<Vec<_>>(); let checkboxes = { let mut checkboxes = Checkboxes::new(); checkboxes.with_prompt( &style("Select genres to create your playlist from") .cyan() .to_string(), ); checkboxes.items( &genres .iter() .map(|(genre, (tracks, artists))| { format!( "{} - {} songs ({})", &genre, tracks.len(), &artists[..min(artists.len(), 5)].join(", ") ) }) .collect::<Vec<String>>() .iter() .map(|s| s.as_ref()) .collect::<Vec<&str>>()[..], ); checkboxes.paged(true); checkboxes }; let selection = checkboxes.interact()?; if selection.is_empty() { println!("No genres selected."); } else { let default_name = selection .iter() .map(|i| &genres.get(*i).unwrap().0) .join("/"); let tracks = selection .into_iter() .map(|i| &(genres.get(i).unwrap().1).0) .flatten() .map(|track| &track.uri) .unique() .collect::<Vec<_>>(); self.create_playlist(tracks, &default_name)?; } Ok(()) } }
.client .get(&format!("{}?ids={}", ARTISTS_INFO, chunk.iter().join(","))) .send()? .error_for_status()?
flags.rs
use crate::ty::subst::{GenericArg, GenericArgKind}; use crate::ty::{self, InferConst, Ty, TypeFlags}; use std::slice; #[derive(Debug)] pub struct FlagComputation { pub flags: TypeFlags, // see `TyS::outer_exclusive_binder` for details pub outer_exclusive_binder: ty::DebruijnIndex, } impl FlagComputation { fn new() -> FlagComputation { FlagComputation { flags: TypeFlags::empty(), outer_exclusive_binder: ty::INNERMOST } } #[allow(rustc::usage_of_ty_tykind)] pub fn
(kind: &ty::TyKind<'_>) -> FlagComputation { let mut result = FlagComputation::new(); result.add_kind(kind); result } pub fn for_predicate(kind: &ty::PredicateKind<'_>) -> FlagComputation { let mut result = FlagComputation::new(); result.add_predicate_kind(kind); result } pub fn for_const(c: &ty::Const<'_>) -> TypeFlags { let mut result = FlagComputation::new(); result.add_const(c); result.flags } fn add_flags(&mut self, flags: TypeFlags) { self.flags = self.flags | flags; } /// indicates that `self` refers to something at binding level `binder` fn add_bound_var(&mut self, binder: ty::DebruijnIndex) { let exclusive_binder = binder.shifted_in(1); self.add_exclusive_binder(exclusive_binder); } /// indicates that `self` refers to something *inside* binding /// level `binder` -- not bound by `binder`, but bound by the next /// binder internal to it fn add_exclusive_binder(&mut self, exclusive_binder: ty::DebruijnIndex) { self.outer_exclusive_binder = self.outer_exclusive_binder.max(exclusive_binder); } /// Adds the flags/depth from a set of types that appear within the current type, but within a /// region binder. fn add_bound_computation(&mut self, computation: FlagComputation) { self.add_flags(computation.flags); // The types that contributed to `computation` occurred within // a region binder, so subtract one from the region depth // within when adding the depth to `self`. let outer_exclusive_binder = computation.outer_exclusive_binder; if outer_exclusive_binder > ty::INNERMOST { self.add_exclusive_binder(outer_exclusive_binder.shifted_out(1)); } // otherwise, this binder captures nothing } #[allow(rustc::usage_of_ty_tykind)] fn add_kind(&mut self, kind: &ty::TyKind<'_>) { match kind { &ty::Bool | &ty::Char | &ty::Int(_) | &ty::Float(_) | &ty::Uint(_) | &ty::Never | &ty::Str | &ty::Foreign(..) => {} &ty::Error(_) => self.add_flags(TypeFlags::HAS_ERROR), &ty::Param(_) => { self.add_flags(TypeFlags::HAS_TY_PARAM); self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE); } &ty::Generator(_, ref substs, _) => { let substs = substs.as_generator(); let should_remove_further_specializable = !self.flags.contains(TypeFlags::STILL_FURTHER_SPECIALIZABLE); self.add_substs(substs.parent_substs()); if should_remove_further_specializable { self.flags -= TypeFlags::STILL_FURTHER_SPECIALIZABLE; } self.add_ty(substs.resume_ty()); self.add_ty(substs.return_ty()); self.add_ty(substs.witness()); self.add_ty(substs.yield_ty()); self.add_ty(substs.tupled_upvars_ty()); } &ty::GeneratorWitness(ts) => { let mut computation = FlagComputation::new(); computation.add_tys(ts.skip_binder()); self.add_bound_computation(computation); } &ty::Closure(_, substs) => { let substs = substs.as_closure(); let should_remove_further_specializable = !self.flags.contains(TypeFlags::STILL_FURTHER_SPECIALIZABLE); self.add_substs(substs.parent_substs()); if should_remove_further_specializable { self.flags -= TypeFlags::STILL_FURTHER_SPECIALIZABLE; } self.add_ty(substs.sig_as_fn_ptr_ty()); self.add_ty(substs.kind_ty()); self.add_ty(substs.tupled_upvars_ty()); } &ty::Bound(debruijn, _) => { self.add_bound_var(debruijn); } &ty::Placeholder(..) => { self.add_flags(TypeFlags::HAS_TY_PLACEHOLDER); self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE); } &ty::Infer(infer) => { self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE); match infer { ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_) => {} ty::TyVar(_) | ty::IntVar(_) | ty::FloatVar(_) => { self.add_flags(TypeFlags::HAS_TY_INFER) } } } &ty::Adt(_, substs) => { self.add_substs(substs); } &ty::Projection(data) => { self.add_flags(TypeFlags::HAS_TY_PROJECTION); self.add_projection_ty(data); } &ty::Opaque(_, substs) => { self.add_flags(TypeFlags::HAS_TY_OPAQUE); self.add_substs(substs); } &ty::Dynamic(ref obj, r) => { let mut computation = FlagComputation::new(); for predicate in obj.skip_binder().iter() { match predicate { ty::ExistentialPredicate::Trait(tr) => computation.add_substs(tr.substs), ty::ExistentialPredicate::Projection(p) => { let mut proj_computation = FlagComputation::new(); proj_computation.add_existential_projection(&p); self.add_bound_computation(proj_computation); } ty::ExistentialPredicate::AutoTrait(_) => {} } } self.add_bound_computation(computation); self.add_region(r); } &ty::Array(tt, len) => { self.add_ty(tt); self.add_const(len); } &ty::Slice(tt) => self.add_ty(tt), &ty::RawPtr(ref m) => { self.add_ty(m.ty); } &ty::Ref(r, ty, _) => { self.add_region(r); self.add_ty(ty); } &ty::Tuple(ref substs) => { self.add_substs(substs); } &ty::FnDef(_, substs) => { self.add_substs(substs); } &ty::FnPtr(f) => { self.add_fn_sig(f); } } } fn add_predicate_kind(&mut self, kind: &ty::PredicateKind<'_>) { match kind { ty::PredicateKind::ForAll(binder) => { let mut computation = FlagComputation::new(); computation.add_predicate_atom(binder.skip_binder()); self.add_bound_computation(computation); } &ty::PredicateKind::Atom(atom) => self.add_predicate_atom(atom), } } fn add_predicate_atom(&mut self, atom: ty::PredicateAtom<'_>) { match atom { ty::PredicateAtom::Trait(trait_pred, _constness) => { self.add_substs(trait_pred.trait_ref.substs); } ty::PredicateAtom::RegionOutlives(ty::OutlivesPredicate(a, b)) => { self.add_region(a); self.add_region(b); } ty::PredicateAtom::TypeOutlives(ty::OutlivesPredicate(ty, region)) => { self.add_ty(ty); self.add_region(region); } ty::PredicateAtom::Subtype(ty::SubtypePredicate { a_is_expected: _, a, b }) => { self.add_ty(a); self.add_ty(b); } ty::PredicateAtom::Projection(ty::ProjectionPredicate { projection_ty, ty }) => { self.add_projection_ty(projection_ty); self.add_ty(ty); } ty::PredicateAtom::WellFormed(arg) => { self.add_substs(slice::from_ref(&arg)); } ty::PredicateAtom::ObjectSafe(_def_id) => {} ty::PredicateAtom::ClosureKind(_def_id, substs, _kind) => { self.add_substs(substs); } ty::PredicateAtom::ConstEvaluatable(_def_id, substs) => { self.add_substs(substs); } ty::PredicateAtom::ConstEquate(expected, found) => { self.add_const(expected); self.add_const(found); } } } fn add_ty(&mut self, ty: Ty<'_>) { self.add_flags(ty.flags()); self.add_exclusive_binder(ty.outer_exclusive_binder); } fn add_tys(&mut self, tys: &[Ty<'_>]) { for &ty in tys { self.add_ty(ty); } } fn add_fn_sig(&mut self, fn_sig: ty::PolyFnSig<'_>) { let mut computation = FlagComputation::new(); computation.add_tys(fn_sig.skip_binder().inputs()); computation.add_ty(fn_sig.skip_binder().output()); self.add_bound_computation(computation); } fn add_region(&mut self, r: ty::Region<'_>) { self.add_flags(r.type_flags()); if let ty::ReLateBound(debruijn, _) = *r { self.add_bound_var(debruijn); } } fn add_const(&mut self, c: &ty::Const<'_>) { self.add_ty(c.ty); match c.val { ty::ConstKind::Unevaluated(_, substs, _) => { self.add_substs(substs); self.add_flags(TypeFlags::HAS_CT_PROJECTION); } ty::ConstKind::Infer(infer) => { self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE); match infer { InferConst::Fresh(_) => {} InferConst::Var(_) => self.add_flags(TypeFlags::HAS_CT_INFER), } } ty::ConstKind::Bound(debruijn, _) => { self.add_bound_var(debruijn); } ty::ConstKind::Param(_) => { self.add_flags(TypeFlags::HAS_CT_PARAM); self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE); } ty::ConstKind::Placeholder(_) => { self.add_flags(TypeFlags::HAS_CT_PLACEHOLDER); self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE); } ty::ConstKind::Value(_) => {} ty::ConstKind::Error(_) => self.add_flags(TypeFlags::HAS_ERROR), } } fn add_existential_projection(&mut self, projection: &ty::ExistentialProjection<'_>) { self.add_substs(projection.substs); self.add_ty(projection.ty); } fn add_projection_ty(&mut self, projection_ty: ty::ProjectionTy<'_>) { self.add_substs(projection_ty.substs); } fn add_substs(&mut self, substs: &[GenericArg<'_>]) { for kind in substs { match kind.unpack() { GenericArgKind::Type(ty) => self.add_ty(ty), GenericArgKind::Lifetime(lt) => self.add_region(lt), GenericArgKind::Const(ct) => self.add_const(ct), } } } }
for_kind
backend.rs
use crate::quad; use crate::text; use crate::triangle; use crate::{Settings, Transformation, Viewport}; use iced_graphics::backend; use iced_graphics::font; use iced_graphics::Layer; use iced_graphics::Primitive; use iced_native::mouse; use iced_native::{Font, HorizontalAlignment, Size, VerticalAlignment}; /// A [`glow`] graphics backend for [`iced`]. /// /// [`glow`]: https://github.com/grovesNL/glow /// [`iced`]: https://github.com/hecrj/iced #[derive(Debug)] pub struct Backend { quad_pipeline: quad::Pipeline, text_pipeline: text::Pipeline, triangle_pipeline: triangle::Pipeline, } impl Backend { /// Creates a new [`Backend`]. /// /// [`Backend`]: struct.Backend.html pub fn new(gl: &glow::Context, settings: Settings) -> Self { let text_pipeline = text::Pipeline::new(gl, settings.default_font); let quad_pipeline = quad::Pipeline::new(gl); let triangle_pipeline = triangle::Pipeline::new(gl); Self { quad_pipeline, text_pipeline, triangle_pipeline, } } /// Draws the provided primitives in the default framebuffer. /// /// The text provided as overlay will be rendered on top of the primitives. /// This is useful for rendering debug information. pub fn draw<T: AsRef<str>>( &mut self, gl: &glow::Context, viewport: &Viewport, (primitive, mouse_interaction): &(Primitive, mouse::Interaction), overlay_text: &[T], ) -> mouse::Interaction { let viewport_size = viewport.physical_size(); let scale_factor = viewport.scale_factor() as f32; let projection = viewport.projection(); let mut layers = Layer::generate(primitive, viewport); layers.push(Layer::overlay(overlay_text, viewport)); for layer in layers { self.flush( gl, scale_factor, projection, &layer, viewport_size.height, ); } *mouse_interaction } fn flush( &mut self, gl: &glow::Context, scale_factor: f32, transformation: Transformation, layer: &Layer<'_>, target_height: u32, )
} impl iced_graphics::Backend for Backend { fn trim_measurements(&mut self) { self.text_pipeline.trim_measurement_cache() } } impl backend::Text for Backend { const ICON_FONT: Font = font::ICONS; const CHECKMARK_ICON: char = font::CHECKMARK_ICON; fn measure( &self, contents: &str, size: f32, font: Font, bounds: Size, ) -> (f32, f32) { self.text_pipeline.measure(contents, size, font, bounds) } } #[cfg(feature = "image")] impl backend::Image for Backend { fn dimensions(&self, _handle: &iced_native::image::Handle) -> (u32, u32) { (50, 50) } } #[cfg(feature = "svg")] impl backend::Svg for Backend { fn viewport_dimensions( &self, _handle: &iced_native::svg::Handle, ) -> (u32, u32) { (50, 50) } }
{ let mut bounds = (layer.bounds * scale_factor).round(); bounds.height = bounds.height.min(target_height); if !layer.quads.is_empty() { self.quad_pipeline.draw( gl, target_height, &layer.quads, transformation, scale_factor, bounds, ); } if !layer.meshes.is_empty() { let scaled = transformation * Transformation::scale(scale_factor, scale_factor); self.triangle_pipeline.draw( gl, target_height, scaled, scale_factor, &layer.meshes, ); } if !layer.text.is_empty() { for text in layer.text.iter() { // Target physical coordinates directly to avoid blurry text let text = glow_glyph::Section { // TODO: We `round` here to avoid rerasterizing text when // its position changes slightly. This can make text feel a // bit "jumpy". We may be able to do better once we improve // our text rendering/caching pipeline. screen_position: ( (text.bounds.x * scale_factor).round(), (text.bounds.y * scale_factor).round(), ), // TODO: Fix precision issues with some scale factors. // // The `ceil` here can cause some words to render on the // same line when they should not. // // Ideally, `wgpu_glyph` should be able to compute layout // using logical positions, and then apply the proper // scaling when rendering. This would ensure that both // measuring and rendering follow the same layout rules. bounds: ( (text.bounds.width * scale_factor).ceil(), (text.bounds.height * scale_factor).ceil(), ), text: vec![glow_glyph::Text { text: text.content, scale: glow_glyph::ab_glyph::PxScale { x: text.size * scale_factor, y: text.size * scale_factor, }, font_id: self.text_pipeline.find_font(text.font), extra: glow_glyph::Extra { color: text.color, z: 0.0, }, }], layout: glow_glyph::Layout::default() .h_align(match text.horizontal_alignment { HorizontalAlignment::Left => { glow_glyph::HorizontalAlign::Left } HorizontalAlignment::Center => { glow_glyph::HorizontalAlign::Center } HorizontalAlignment::Right => { glow_glyph::HorizontalAlign::Right } }) .v_align(match text.vertical_alignment { VerticalAlignment::Top => { glow_glyph::VerticalAlign::Top } VerticalAlignment::Center => { glow_glyph::VerticalAlign::Center } VerticalAlignment::Bottom => { glow_glyph::VerticalAlign::Bottom } }), ..Default::default() }; self.text_pipeline.queue(text); } self.text_pipeline.draw_queued( gl, transformation, glow_glyph::Region { x: bounds.x, y: target_height - (bounds.y + bounds.height), width: bounds.width, height: bounds.height, }, ); } }
halos.py
import numpy as np from seren3 import config from seren3.halos import Halo, HaloCatalogue import logging logger = logging.getLogger('seren3.halos.halos') class AHFCatalogue(HaloCatalogue): ''' Class to handle catalogues produced by AHF. ''' # assume file structure like this # ID(1) hostHalo(2) numSubStruct(3) Mvir(4) npart(5) Xc(6) # Yc(7) Zc(8) VXc(9) VYc(10) VZc(11) Rvir(12) Rmax(13) # r2(14) mbp_offset(15) com_offset(16) Vmax(17) v_esc(18) # sigV(19) lambda(20) lambdaE(21) Lx(22) Ly(23) Lz(24) # b(25) c(26) Eax(27) Eay(28) Eaz(29) Ebx(30) Eby(31) Ebz(32) Ecx(33) # Ecy(34) Ecz(35) ovdens(36) nbins(37) fMhires(38) Ekin(39) # Epot(40) SurfP(41) Phi0(42) cNFW(43) # n_gas(44) M_gas(45) lambda_gas(46) lambdaE_gas(47) # Lx_gas(48) Ly_gas(49) Lz_gas(50) b_gas(51) # c_gas(52) Eax_gas(53) Eay_gas(54) Eaz_gas(55) # Ebx_gas(56) Eby_gas(57) Ebz_gas(58) Ecx_gas(59) # Ecy_gas(60) Ecz_gas(61) Ekin_gas(62) Epot_gas(63) # n_star(64) M_star(65) lambda_star(66) lambdaE_star(67) # Lx_star(68) Ly_star(69) Lz_star(70) b_star(71) # c_star(72) Eax_star(73) Eay_star(74) Eaz_star(75) # Ebx_star(76) Eby_star(77) Ebz_star(78) Ecx_star(79) # Ecy_star(80) Ecz_star(81) Ekin_star(82) Epot_star(83) halo_type = np.dtype([('id', np.int64), ('hosthalo', np.int64), ('numsubstruct', np.int64), ('mvir', 'f'), ('num_p', np.int64), ('pos', 'f', 3), ('vel', 'f', 3), ('rvir', 'f'), ('rmax', 'f'), ('r2', 'f'), ('mpb_offset', 'f'), ('com_offset', 'f'), ('v_max', 'f'), ('v_esc', 'f'), ('sigv', 'f'), ('bullock_spin', 'f'), ('spin', 'f'), ('l', 'f', 3), ('b', 'f'), ('c', 'f'), ('ea', 'f', 3), ('eb', 'f', 3), ('ec', 'f', 3), ('ovdens', 'f'), ('nbins', np.int64), ('fmhires', 'f'), ('ekin', 'f'), ('epot', 'f'), ('surfp', 'f'), ('phiO', 'f'), ('cnfw', 'f'), ('n_gas', np.int64), ('m_gas', 'f'), ('bullock_spin_gas', 'f'), ('spin_gas', 'f'), ('l_gas', 'f', 3), ('b_gas', 'f'), ('c_gas', 'f'), ('ea_gas', 'f', 3), ('eb_gas', 'f', 3), ('ec_gas', 'f', 3,), ('ekin_gas', 'f'), ('epot_gas', 'f'), ('n_star', np.int64), ('m_star', 'f'), ('bullock_spin_star', 'f'), ('spin_star', 'f'), ('l_star', 'f', 3), ('b_star', 'f'), ('c_star', 'f'), ('ea_star', 'f', 3), ('eb_star', 'f', 3), ('ec_star', 'f', 3,), ('ekin_star', 'f'), ('epot_star', 'f')]) units = {'mvir': 'Msol h**-1', 'pos': 'kpc a h**-1', 'vel': 'km s**-1', 'rvir': 'kpc a h**-1', 'rmax': 'kpc a h**-1', 'r2': 'kpc a h**-1', 'mpb_offset': 'kpc a h**-1', 'com_offset': 'kpc a h**-1', 'v_max': 'km s**-1', 'v_esc': 'km s**-1', 'sigv': 'km s**-1', 'b_to_a': 'kpc a h**-1', 'c_to_a': 'kpc a h**-1', 'ekin': 'Msol h**-1 (km s**-1ec)**2', 'epot': 'Msol h**-1 (km s**-1ec)**2', 'surfp': 'Msol h**-1 (km s**-1ec)**2', 'phiO': '(km s**-1ec)**2', 'm_gas': 'Msol h**-1', 'b_to_a_gas': 'kpc a h**-1', 'c_to_a_gas': 'kpc a h**-1', 'ekin_gas': 'Msol h**-1 (km s**-1ec)**2', 'epot_gas': 'Msol h**-1 (km s**-1ec)**2', 'm_star': 'Msol h**-1', 'b_to_a_star': 'kpc a h**-1', 'c_to_a_star': 'kpc a h**-1', 'ekin_star': 'Msol h**-1 (km s**-1ec)**2', 'epot_star': 'Msol h**-1 (km s**-1ec)**2', } def __init__(self, pymses_snapshot, filename=None, **kwargs): super(AHFCatalogue, self).__init__( pymses_snapshot, "AHF", filename=filename, **kwargs) ################## IMPLEMENT ABSTRACT FUNCTIONS ################## def gadget_format_exists(self): ''' Checks if ramses2gadget has been ran ''' import glob path = "%s/output_%05d/" % (self.base.path, self.base.ioutput) return len(glob.glob("%s/ramses2gadget*" % path)) > 0 def run(self, **kwargs): ''' Run ramses2gadget then AHF ''' import subprocess, os from seren3.utils.sge import ncpu from seren3.utils import which r2g = which("ramses2gadget") ahf = which("AHF-v1.0-084") tasks = [] # Write the config path = "%s/AHF/%03d/" % (self.base.path, self.base.ioutput) if os.path.isfile("%s/ahf.input" % path) is False: if os.path.isfile("%s/ahf.input" % path) is False: if self.write_cfg(**kwargs): print "AHFCatalogue wrote a partial(!) config file." else: raise Exception("AHFCatalogue unable to write config file!") # Check if GADGET data exists print 'GADGET format exists: ', self.gadget_format_exists() if self.gadget_format_exists() is False: r2g_mode = kwargs.pop("r2g_mode", "g") # default to sim with gas # Build the ramses2gadget input_dir r2g_input_dir = "%s/output_%05d/" % (self.base.path, self.base.ioutput) # Build exe string r2g_exe = "{EXE} -{MODE} {INPUT_DIR} | tee {INPUT_DIR}/r2g.log".format(EXE=r2g, MODE=r2g_mode, INPUT_DIR=r2g_input_dir) tasks.append(r2g_exe) # Repeat for AHF ahf_path = "%s/AHF/%03d/" % (self.base.path, self.base.ioutput) ahf_input_fname = "%s/ahf.input" % ahf_path if os.path.isdir("%s/halos" % ahf_path) is False: os.mkdir("%s/halos" % ahf_path) ahf_exe = "{EXE} {FNAME}".format(EXE=ahf, FNAME=ahf_input_fname) tasks.append(ahf_exe) # Run the tasks NSLOTS = kwargs.get("NSLOTS", int(ncpu() / 4.)) for task in tasks: mpi_task = "mpirun -np {NSLOTS} {EXE}".format(NSLOTS=NSLOTS, EXE=task) print mpi_task subprocess.check_output(mpi_task, shell=True) subprocess.check_output("cat {AHF_PATH}/halos/*_halos > {AHF_PATH}/halos/all_halos".format(AHF_PATH=ahf_path), shell=True) super(AHFCatalogue, self).__init__(self.base, "AHF", filename=None, **kwargs) return True @property def ahf_path(self): return "%s/%03d/halos/" % (self.finder_base_dir, self.base.ioutput) def get_boxsize(self, **kwargs): ''' Returns the boxsize, according to AHF, in Mpc a h**-1 ''' import glob list_files = glob.glob("%s/*.log" % self.ahf_path) with open(list_files[0], 'r') as f: while True: l = f.readline() if l.startswith('simu.boxsize'): box_size = float(l.split(':')[1]) return self.base.array(box_size, "Mpc a h**-1") # def can_load(self, **kwargs): # ''' # Check if hlist files exist # ''' # import os # if os.path.isfile('%s/all_halos' % self.ahf_path) is False: # path = "%s/AHF/%03d/" % (self.base.path, self.base.ioutput) # if os.path.isfile("%s/ahf.input" % path) is False: # if self.write_cfg(**kwargs): # print "AHFCatalogue wrote a partial(!) config file." # else: # raise Exception("AHFCatalogue unable to write config file!") # else: # print "AHFCatalogue not found - ahf.input already written!" # return False # return True def can_load(self, **kwargs): import os if os.path.isfile("%s/all_halos" % self.ahf_path): return True, "exists" else: return False, "Cannot locate all_halos file" def get_filename(self, **kwargs): return "%s/all_halos" % self.ahf_path def load(self, within_r=None, center=np.array([0.5, 0.5, 0.5]), **kwargs): # Ensures file is closed at the end. If within_r is specified, it must be in code units with open(self.filename, 'r') as f: haloprops = np.loadtxt(f, dtype=self.halo_type, comments="#") if within_r: d = np.array([np.sqrt( (center[0] - (h['pos'][0]/self.boxsize/1.e3))**2 + \ (center[1] - (h['pos'][1]/self.boxsize/1.e3))**2 + \ (center[2] - (h['pos'][2]/self.boxsize/1.e3))**2 ) for h in haloprops]) idx = np.where(d <= within_r) haloprops = haloprops[idx] self._nhalos = len(haloprops) self._haloprops = haloprops #for h in xrange(self._nhalos): # self._halos[h] = Halo(self._haloprops[h]['id'], self, self._haloprops[h]) def _get_halo(self, item): haloprops = self._haloprops[item] return Halo(haloprops, self.base, self.units, self.get_boxsize()) def write_cfg(self, **kwargs): ''' Internal function to write an appropriate AHF input file ''' import os path = "%s/AHF/%03d/" % (self.base.path, self.base.ioutput) if os.path.isdir(path) is False: if os.path.isdir("%s/AHF/" % self.base.path) is False: os.mkdir("%s/AHF/" % self.base.path) os.mkdir(path) with open("%s/ahf.input" % path, "w") as f: f.write("[AHF]\n") f.write("ic_filename = %s/output_%05d/ramses2gadget_%03d.\n" % (self.base.path, self.base.ioutput, self.base.ioutput)) f.write("ic_filetype = 61\n") # GADGET f.write("outfile_prefix = %s/AHF/%03d/halos/ahf_\n" % (self.base.path, self.base.ioutput)) LgridDomain = kwargs.pop("LgridDomain", 128) LgridMax = kwargs.pop("LgridMax", 16777216) NperDomCell = kwargs.pop("NperDomCell", 5.0) NperRefCell = kwargs.pop("NperRefCell", 5.0) VescTune = kwargs.pop("VescTune", 1.5) NminPerHalo = kwargs.pop("NminPerHalo", 20) RhoVir = kwargs.pop("RhoVir", 0) Dvir = kwargs.pop("Dvir", 200) MaxGatherRad = kwargs.pop("MaxGatherRad", 3.0) LevelDomainDecomp = kwargs.pop("LevelDomainDecomp", 6) NcpuReading = kwargs.pop("NcpuReading", 1) GADGET_LUNIT = kwargs.pop("GADGET_LUNIT", 1e-3) GADGET_MUNIT = kwargs.pop("GADGET_MUNIT", 1e10) f.write("LgridDomain = %d\n" % LgridDomain) f.write("LgridMax = %d\n" % LgridMax) f.write("NperDomCell = %f\n" % NperDomCell) f.write("NperRefCell = %f\n" % NperRefCell) f.write("VescTune = %f\n" % VescTune) f.write("NminPerHalo = %d\n" % NminPerHalo) f.write("RhoVir = %f\n" % RhoVir) f.write("Dvir = %f\n" % Dvir) f.write("MaxGatherRad = %f\n" % MaxGatherRad) f.write("LevelDomainDecomp = %d\n" % LevelDomainDecomp) f.write("NcpuReading = %d\n" % NcpuReading) f.write("[GADGET]\n") f.write("GADGET_LUNIT = %e\n" % GADGET_LUNIT) f.write("GADGET_MUNIT = %e\n" % GADGET_MUNIT) # Any params we missed # for key in kwargs.keys(): # f.write("%s = %s\n" % (key, kwargs[key])) logger.info( "%sCatalogue wrote a partial(!) config file. Exiting" % self.finder) return True class RockstarCatalogue(HaloCatalogue): ''' Class to handle catalogues produced by Rockstar Reads the out.list files ''' # halo_type = np.dtype( [('id', np.int64), ('descid', np.int64), \ # ('mvir', 'f'), ('vmax', 'f'), \ # ('vrms', 'f'), ('rvir', 'f'), \ # ('rs', 'f'), ('np', 'f'), \ # ('pos', 'f', 3), ('vel', 'f', 3), \ # ('J', 'f', 3), ('spin', 'f'), \ # ('rs_klypin', 'f'), ('mvir_all', 'f'), \ # ('m200b', 'f'), ('m200c', 'f'), \ # ('m500c', 'f'), ('m2500c', 'f'), \ # ('r200b', 'f'), ('r200c', 'f'), \ # ('r500c', 'f'), ('r2500c', 'f'), \ # ('xoff', 'f'), ('voff', 'f'), \ # ('spin_bullock', 'f'), ('b_to_a', 'f'), \ # ('c_to_a', 'f'), ('A', 'f', 3), \ # ('b_to_a_500c', 'f'), ('c_to_a_500c', 'f'), \ # ('A500c', 'f', 3), ('T/U', 'f'), \ # ('m_pe_behroozi', 'f'), ('M_pe_Diemer', 'f'), \ # ('halfmass_radius', 'f')] ) halo_type = np.dtype( [('id', np.int64), ('descid', np.int64), \ ('mvir', 'f'), ('vmax', 'f'), \ ('vrms', 'f'), ('rvir', 'f'), \ ('rs', 'f'), ('np', 'f'), \ ('pos', 'f', 3), ('vel', 'f', 3), \ ('J', 'f', 3), ('spin', 'f'), \ ('rs_klypin', 'f'), ('mvir_all', 'f'), \ ('m200b', 'f'), ('m200c', 'f'), \ ('m500c', 'f'), ('m2500c', 'f'), \ ('xoff', 'f'), ('voff', 'f'), \ ('spin_bullock', 'f'), ('b_to_a', 'f'), \ ('c_to_a', 'f'), ('A', 'f', 3), \ ('b_to_a_500c', 'f'), ('c_to_a_500c', 'f'), \ ('A500c', 'f', 3), ('T/U', 'f'), \ ('m_pe_behroozi', 'f'), ('M_pe_Diemer', 'f'), \ ('halfmass_radius', 'f')] ) units = {'sam_mvir': 'Msol h**-1', 'mvir': 'Msol h**-1', 'rvir': 'kpc a h**-1', 'rs': 'kpc a h**-1', 'vrms': 'km s**-1', 'vmax': 'km s**-1', 'pos': 'Mpc a h**-1', 'vel': 'km s**-1', 'J': 'Msol h**-1 Mpc h**-1 km s**-1', 'mvir_all': 'Msol h**-1', 'm200b': 'Msol h**-1', 'm200c': 'Msol h**-1', 'm500c': 'Msol h**-1', 'm2500c': 'Msol h**-1', 'm_alt': 'Msol h**-1', #'r_alt': 'kpc a h**-1', 'xoff': 'kpc a h**-1', 'voff': 'km s**-1', 'A': 'kpc a h**-1', 'halfmass_r': 'kpc a h**-1', 'macc': 'Msol h**-1', 'mpeak': 'Msol h**-1', 'vacc': 'km s**-1', 'vpeak': 'km s**-1', 'acc_rate_inst': 'Msol h**-1 yr**-1', 'acc_rate_100myr': 'Msol h**-1 100Myr**-1', 'first_acc_mvir': 'Msol h**-1', 'first_acc_vmax': 'km s**-1', 'vmax_at_mpeak': 'km s**-1'} def __init__(self, pymses_snapshot, **kwargs): super(RockstarCatalogue, self).__init__(pymses_snapshot, "Rockstar", **kwargs) def can_load(self, **kwargs): import os # return os.path.isdir("%s/%s/" % (self.base.path, config.get("halo", "rockstar_base"))) and os.path.isfile(self.get_rockstar_info_fname()) if os.path.isdir(self.finder_base_dir): if os.path.isfile(self.get_rockstar_info_fname()): return True, "exists" else: return False, "Cannot locate info file" else: return False, "rockstar directory doesn't exist" def get_rockstar_info_fname(self): return "%s/info_rockstar.txt" % self.finder_base_dir def get_filename(self, **kwargs): ''' Returns the rockstar catalogue filename ''' rockstar_info_fname = self.get_rockstar_info_fname() base_aexp = 1./(1. + self.base.z) if kwargs.get("strict_so", False): # Used for accurate comparissons of halo mass-function. # Uses strict spherical-overdensities for mass calculation, instead # of FOF group. self.finder_base_dir = "%s/rockstar_strict_so_mass/" % self.base.path out_num = [] aexp = [] with open(rockstar_info_fname, "r") as f: for line in f: split_line = line.split('\t') out_num.append( int(split_line[0]) ) aexp.append( float(split_line[1]) ) aexp = np.array(aexp) idx_closest = (np.abs(aexp - base_aexp)).argmin() out_fname = "out_%i.list" % (out_num[idx_closest]) #print 'RockstarCatalogue: matched to %s' % out_fname fname = "%s/%s" % (self.finder_base_dir, out_fname) return fname def get_boxsize(self, **kwargs): ''' Returns boxsize according to rockstar ''' import re with open(self.filename, 'r') as f: for line in f: if line.startswith('#Box size:'): boxsize = re.findall("\d+\.\d+", line)[0] return self.base.array(float(boxsize), "Mpc a h**-1") # Mpc a h**-1 def load(self, **kwargs): # Ensures file is closed at the end. If within_r is specified, it must be in code units with open(self.filename, 'r') as f: haloprops = np.loadtxt(f, dtype=self.halo_type, comments="#") self._nhalos = len(haloprops) self._haloprops = haloprops def _get_halo(self, item): haloprops = self._haloprops[item] return Halo(haloprops, self.base, self.units, self.get_boxsize()) class ConsistentTreesCatalogue(HaloCatalogue): halo_type = np.dtype([('aexp', 'f'), ('id', np.int64), ('desc_aexp', 'f'), ('desc_id', 'f'), ('num_prog', np.int64), ('pid', np.int64), ('upid', np.int64), ('desc_pid', np.int64), ('phantom', 'f'), ('sam_mvir', 'f'), ('mvir', 'f'), ('rvir', 'f'), ('rs', 'f'), ('vrms', 'f'), ('mmp', np.int64), # Bool - most massive progenitor ('scale_of_last_mm', 'f'), ('vmax', 'f'), ('pos', 'f', 3), ('vel', 'f', 3), ('J', 'f', 3), ('spin', 'f'), ('breadth_first_id', np.int64), ('depth_first_id', np.int64), ('tree_root_id', np.int64), ('orig_halo_id', np.int64), ('snap_num', np.int64), ('next_coprog_depth_first_id', np.int64), ('last_prog_depth_first_id', np.int64), ('last_mainlead_depth_first_id', np.int64), ('tidal_force', 'f'), ('tidal_id', np.int64), ('rs_klypin', 'f'), ('mvir_all', 'f'), ('m_alt', 'f', 4), #('r_alt', 'f', 4), ('xoff', 'f'), ('voff', 'f'), ('spin_bullock', 'f'), ('b_to_a', 'f'), ('c_to_a', 'f'), ('A', 'f', 3), ('b_to_a_500c', 'f'), ('c_to_a_500c', 'f'), ('A_500c', 'f', 3), ('T/|U|', 'f'), ('m_pe_behroozi', 'f'), ('m_pe_diemer', 'f'), ('halfmass_r', 'f'), # Consistent Trees Version 1.0 - Mass at accretion ('macc', 'f'), ('mpeak', 'f'), # Consistent Trees Version 1.0 - Vmax at accretion ('vacc', 'f'), ('vpeak', 'f'), ('halfmass_scale', 'f'), ('acc_rate_inst', 'f'), ('acc_rate_100myr', 'f'), ('acc_rate_1tdyn', 'f'), ('acc_rate_2tdyn', 'f'), ('acc_rate_mpeak', 'f'), ('mpeak_scale', 'f'), ('acc_scale', 'f'), ('first_acc_scale', 'f'), ('first_acc_mvir', 'f'), ('first_acc_vmax', 'f'), ('vmax_at_mpeak', 'f'), ('tidal_force_tdyn', 'f'), ('log_vmax_vmax_tdyn_dmpeak', 'f'), ('time_to_future_merger', 'f'), ('future_merger_mmp_id', 'f')]) units = { 'sam_mvir': 'Msol h**-1', 'mvir': 'Msol h**-1', 'rvir': 'kpc a h**-1', 'rs': 'kpc a h**-1', 'vrms': 'km s**-1', 'vmax': 'km s**-1', 'pos': 'Mpc a h**-1', 'vel': 'km s**-1', 'J': 'Msol h**-1 Mpc h**-1 km s**-1', 'mvir_all': 'Msol h**-1', 'm_alt': 'Msol h**-1', #'r_alt': 'kpc a h**-1', 'xoff': 'kpc a h**-1', 'voff': 'km s**-1', 'A': 'kpc a h**-1', 'halfmass_r': 'kpc a h**-1', 'macc': 'Msol h**-1', 'mpeak': 'Msol h**-1', 'vacc': 'km s**-1', 'vpeak': 'km s**-1', 'acc_rate_inst': 'Msol h**-1 yr**-1', 'acc_rate_100myr': 'Msol h**-1 100Myr**-1', 'first_acc_mvir': 'Msol h**-1', 'first_acc_vmax': 'km s**-1', 'vmax_at_mpeak': 'km s**-1' } def __init__(self, pymses_snapshot, **kwargs): super(ConsistentTreesCatalogue, self).__init__(pymses_snapshot, "ConsistentTrees", **kwargs) def can_load(self, **kwargs): import glob if len(glob.glob("%s/hlist_*" % self.finder_base_dir)) > 0.: return True, "exists" else: return False, "Unable to locate hlists files" def get_filename(self, **kwargs):
def get_boxsize(self, **kwargs): ''' Returns boxsize according to rockstar in Mpc a / h ''' import re with open(self.filename, 'r') as f: for line in f: if line.startswith('#Full box size'): boxsize = re.findall("\d+\.\d+", line)[0] return self.base.array(float(boxsize), "Mpc a h**-1") # Mpc a / h def load(self, **kwargs): # Ensures file is closed at the end. If within_r is specified, it must be in code units with open(self.filename, 'r') as f: haloprops = np.loadtxt(f, dtype=self.halo_type, comments="#") self._nhalos = len(haloprops) self._haloprops = haloprops def _get_halo(self, item): haloprops = self._haloprops[item] return Halo(haloprops, self.base, self.units, self.get_boxsize()) @staticmethod def _find_mmp(hid, prog_halos): ''' Returns the id for the most massive progenitor ''' search_key = lambda halos: halos[:]["desc_id"] == hid progs = prog_halos.search(search_key) if len(progs) > 1: mmp_search_key = lambda x: x["mvir"] progs_sorted = sorted(progs, key=mmp_search_key, reverse=True) return progs_sorted[0].hid elif len(progs) == 1: return progs[0].hid else: return None def find_mmp(self, halo, back_to_iout=None): ''' Locates the most massive progenitor ''' from seren3 import load_snapshot if back_to_iout is None: back_to_iout = self.base.ioutput-1 hid = halo.hid ioutputs = range(back_to_iout, self.base.ioutput)[::-1] last = self.base.ioutput for iout_prog in ioutputs: # Start with the previous snapshot, find the most massive progenitor and use that prog_snap = load_snapshot(self.base.path, iout_prog) prog_halos = prog_snap.halos(finder='ctrees') mmp_id = self._find_mmp(hid, prog_halos) if mmp_id is None: print 'Unable to fing progenitor in output %i.\nReturning last know progenitor (output %i)' % (iout_prog, last) return hid, prog_halos else: hid = mmp_id last = iout_prog return hid, prog_halos def iterate_progenitors(self, halo, back_to_aexp=0., verbose=True): ''' Iterates through list of progenitors without loading halo catalogues completely ''' import numpy as np import glob from seren3.utils import natural_sort from seren3.core.simulation import Simulation outputs = natural_sort(glob.glob("%s/hlist_*" % self.finder_base_dir)) aexp_hlist = np.zeros(len(outputs)) for i in range(len(outputs)): output = outputs[i] # Trim aexp from string aexp_hfile = float(output.split('/')[-1][6:-5]) aexp_hlist[i] = aexp_hfile idx_start = np.abs( aexp_hlist - self.base.info["aexp"] ).argmin() idx_end = np.abs( aexp_hlist - back_to_aexp ).argmin() # print idx_start, idx_end hid = int(halo.hid) sim = Simulation(halo.base.path) aexp_to_z = lambda aexp: (1./aexp) - 1. z_start = aexp_to_z(aexp_hlist[idx_start]) z_end = aexp_to_z(aexp_hlist[idx_end]) # return idx_start, idx_end, z_start, z_end, outputs, aexp_hlist iout_start = sim.redshift(z_start) iout_end = sim.redshift(z_end) for iout in range(iout_end, iout_start)[::-1]: isnap = sim[iout] ihalos = isnap.halos() mmp_props = None mmp_mass = 0. ihalo = None for i in range(len(ihalos)): props = ihalos._haloprops[i] # if (props["desc_id"] == hid) and (props["mvir"] > mmp_mass): if (props["desc_id"] == hid) and (props["mmp"]): # This halo is a candidate for mmp # mmp_props = props # mmp_mass = props["mvir"] ihalo = i break # if (mmp_props != None): # yield Halo(mmp_props, isnap, self.units, self.get_boxsize()) # the mmp # hid = int(mmp_props["id"]) if (ihalo != None): iprog = ihalos[ihalo] yield iprog hid = int(iprog["id"]) else: if (verbose): print "No descentent found - exiting" break # Loop through hlists in reverse and locate progenitors # for i in range(idx_end, idx_start+1)[::-1]: # mmp_props = None # mmp_mass = 0. # with open( outputs[i], "r" ) as f: # # print outputs[i] # haloprops = np.loadtxt(f, dtype=self.halo_type, comments="#") # for props in haloprops: # # if (props["desc_id"] == hid) and (props["mvir"] > mmp_mass): # if (props["desc_id"] == hid) and (props["mmp"]): # # This halo is a candidate for mmp # mmp_props = props # mmp_mass = props["mvir"] # if (mmp_props != None): # sim = Simulation(halo.base.path) # # print aexp_hlist[::-1][i] # # z = (1./aexp_hlist[::-1][i]) - 1. # z = (1./aexp_hlist[i]) - 1. # prog_snap = sim[sim.redshift(z)] # yield Halo(mmp_props, prog_snap, self.units, self.get_boxsize()) # the mmp # hid = int(mmp_props["id"]) # else: # if (verbose): # print "No descentent found - exiting" # break
import glob, math from seren3.exceptions import CatalogueNotFoundException # Filename is hlist_aexp.list # Look through the outputs and find the closest expansion factor aexp = self.base.cosmo['aexp'] if kwargs.get("strict_so", False): # Used for accurate comparissons of halo mass-function. # Uses strict spherical-overdensities for mass calculation, instead # of FOF group. self.finder_base_dir = "%s/rockstar_strict_so_mass/hlists/" % self.base.path # Scan halo files for available expansion factors outputs = glob.glob( "%s/hlist_*" % (self.finder_base_dir) ) if len(outputs) == 0: raise IOError("ConsistentTreesCatalogue: No outputs found") aexp_hlist = np.zeros(len(outputs)) for i in range(len(outputs)): output = outputs[i] # Trim the aexp from the string aexp_hfile = float(output.split('/')[-1][6:-5]) aexp_hlist[i] = aexp_hfile # Find the closest match idx = np.argmin(np.abs(aexp_hlist - aexp)) if min(aexp_hlist[idx] / aexp, aexp / aexp_hlist[idx]) < 0.995: raise CatalogueNotFoundException("Unable to locate catalogue close to this snapshot.\nHlist aexp: %f, Snap aexp: %f" % (aexp_hlist[idx], aexp)) return outputs[idx]
se_authority.py
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from typing import Any, Dict from pandas import DataFrame, concat from lib.data_source import DataSource from lib.time import datetime_isoformat from lib.utils import aggregate_admin_level, table_merge, table_rename from pipelines.epidemiology.it_authority import _subregion1_code_converter _column_adapter = { "Vecka": "week", "År": "year", "Region": "match_string", "Antal vaccinerade": "_total_doses", # "Andel vaccinerade": "", "Dosnummer": "_dose_type", } class SwedenDataSource(DataSource): d
ef parse_dataframes( self, dataframes: Dict[Any, DataFrame], aux: Dict[str, DataFrame], **parse_opts ) -> DataFrame: data = table_rename(dataframes[0], _column_adapter, drop=True) # Convert date to ISO format data["date"] = data["year"].apply(lambda x: datetime.datetime.strptime(str(x), "%Y")) data["date"] = data["date"] + data["week"].apply(lambda x: datetime.timedelta(weeks=x)) data["date"] = data["date"].apply(lambda x: x.date().isoformat()) data = data.drop(columns=["week", "year"]) # Process 1-dose and 2-dose separately data_1_dose = data[data["_dose_type"].str.slice(-1) == "1"].drop(columns=["_dose_type"]) data_2_dose = data[data["_dose_type"].str.slice(-1) == "2"].drop(columns=["_dose_type"]) data_1_dose = data_1_dose.rename(columns={"_total_doses": "total_persons_vaccinated"}) data_2_dose = data_2_dose.rename(columns={"_total_doses": "total_persons_fully_vaccinated"}) data = table_merge([data_1_dose, data_2_dose], how="outer") # Make sure only subregion1 matches data["key"] = None data["country_code"] = "SE" data["subregion2_code"] = None data["locality_code"] = None # Country totals are reported using a special name data.loc[data["match_string"] == "| Sverige |", "key"] = "SE" # Estimate the total doses from person counts data["total_vaccine_doses_administered"] = ( data["total_persons_vaccinated"] + data["total_persons_fully_vaccinated"] ) return data
chain_querier.rs
use std::sync::Arc; use derive_more::{Display, From}; use futures::executor::block_on; use protocol::traits::{ChainQuerier, Context, Storage}; use protocol::types::{Block, Hash, Receipt, SignedTransaction}; use protocol::{ProtocolError, ProtocolErrorKind, ProtocolResult}; pub struct DefaultChainQuerier<S: Storage> { storage: Arc<S>, } impl<S: Storage> DefaultChainQuerier<S> { pub fn new(storage: Arc<S>) -> Self { Self { storage } } } impl<S: Storage> ChainQuerier for DefaultChainQuerier<S> { fn get_transaction_by_hash(&self, tx_hash: &Hash) -> ProtocolResult<Option<SignedTransaction>> { let ret = block_on( self.storage .get_transaction_by_hash(Context::new(), tx_hash.clone()), ) .map_err(|_| ChainQueryError::AsyncStorage)?; Ok(Some(ret)) } fn get_block_by_height(&self, height: Option<u64>) -> ProtocolResult<Option<Block>> { if let Some(u) = height
else { let ret = block_on(self.storage.get_latest_block(Context::new())) .map_err(|_| ChainQueryError::AsyncStorage)?; Ok(Some(ret)) } } fn get_receipt_by_hash(&self, tx_hash: &Hash) -> ProtocolResult<Option<Receipt>> { let ret = block_on(self.storage.get_receipt(Context::new(), tx_hash.clone())) .map_err(|_| ChainQueryError::AsyncStorage)?; Ok(Some(ret)) } } #[derive(Debug, Display, From)] pub enum ChainQueryError { #[display(fmt = "get error when call async method of storage")] AsyncStorage, } impl std::error::Error for ChainQueryError {} impl From<ChainQueryError> for ProtocolError { fn from(err: ChainQueryError) -> ProtocolError { ProtocolError::new(ProtocolErrorKind::Binding, Box::new(err)) } }
{ let ret = block_on(self.storage.get_block_by_height(Context::new(), u)) .map_err(|_| ChainQueryError::AsyncStorage)?; Ok(Some(ret)) }
st_nodeserver.py
#!/usr/bin/python ''' ST Micro Node Server for Polyglot by Einstein.42(James Milne) [email protected] ''' import sys from polyglot.nodeserver_api import SimpleNodeServer, PolyglotConnector from st_types import STControl VERSION = "0.0.1" class STNodeServer(SimpleNodeServer): ''' ST Micro Node Server ''' sensors = [] def setup(self): self.logger = self.poly.logger self.logger.info('Config File param: %s', self.poly.configfile) try: self.address = self.poly.nodeserver_config['server']['address'] self.port = self.poly.nodeserver_config['server']['port'] except (KeyError, ValueError) as ex: self.logger.error('Could not find address or port value in config file. Exiting...') sys.exit() self.logger.info('Using Leshan Server: {} Port: {}'.format(self.address, self.port)) manifest = self.config.get('manifest', {}) self.controller = STControl(self, 'stcontrol', 'ST Control', True, manifest) self.controller._discover() self.update_config() def
(self): if len(self.sensors) >= 1: for i in self.sensors: i.update_info() def long_poll(self): pass def report_drivers(self): if len(self.sensors) >= 1: for i in self.sensors: i.report_driver() def main(): # Setup connection, node server, and nodes poly = PolyglotConnector() # Override shortpoll and longpoll timers to 5/30, once per second is unnessesary nserver = STNodeServer(poly, 30, 60) poly.connect() poly.wait_for_config() poly.logger.info("ST NodeServer Interface version " + VERSION + " created. Initiating setup.") nserver.setup() poly.logger.info("Setup completed. Running Server.") nserver.run() if __name__ == "__main__": main()
poll
SinaSpider.py
# -*- coding: utf-8 -*- import scrapy import os from ..items import SinaSpiderItem from ..settings import COOKIES import re class SinaspiderSpider(scrapy.Spider):
name = 'SinaSpider' allowed_domains = ["sina.com",'sina.com.cn'] start_urls = ['http://www.sina.com.cn/'] def parse(self, response): first_lev_node_list = response.xpath("//div[@class='main-nav']/div")[0:5] for first_lev_node_ul in first_lev_node_list: first_lev_node_ul_list = first_lev_node_ul.xpath("./ul") for first_lev_node in first_lev_node_ul_list: first_lev_node_name = first_lev_node.xpath("./li/a/b/text()").extract_first() second_lev_node_list = first_lev_node.xpath("./li")[1:4] for second_lev_node in second_lev_node_list: item = SinaSpiderItem() # num += 1 second_lev_node_name = second_lev_node.xpath("./a/text()").extract_first() second_lev_node_url = second_lev_node.xpath("./a/@href").extract_first() if not os.path.exists(first_lev_node_name + "/" + second_lev_node_name): os.makedirs(first_lev_node_name + "/" + second_lev_node_name) item['path'] = first_lev_node_name + "/" + second_lev_node_name # print(num) # print(first_lev_node_name + "/" + second_lev_node_name) # print(item) # num_list.append(item) # print(num_list) # print(item['news_url']) yield scrapy.Request(second_lev_node_url, callback=self.parse_detil, meta={'item1': item}, cookies=COOKIES) # print("PARSE_URL----") def parse_detil(self, response): # print("PARSE_DETIAL", id(response)) item = response.meta["item1"] shtml_url_list = [] for news_url in response.xpath("//a/@href").extract(): if news_url.endswith('.shtml'): if news_url.startswith('http:'): news_url= news_url else: news_url = 'http:' + news_url shtml_url_list.append(news_url) for shtml_url in shtml_url_list: item['news_url'] = shtml_url yield scrapy.Request(shtml_url, callback=self.parse_news, meta={'item2': item}, cookies=COOKIES) # yield item def parse_news(self, response): # print("PARSE_NEWS", response.url) item = response.meta["item2"] if response.url.startswith('http://english.sina.com/'): news_name = response.xpath("//div[@id='artibodyTitle']/h1/text()").extract_first() if news_name: news_name =re.sub(r'<.*?>', '' , news_name) item['news_name'] = news_name news_detil = response.xpath("//div[@id='artibody']").extract()[0] if len(response.xpath("//div[@id='artibody']").extract()) else '' news_detil = re.sub(r'<.*?>', '', news_detil) item['news_detil'] = news_detil else: item['news_name'] = 'error' item['news_detil'] = '' else: news_name = response.xpath("//h1[@class='main-title']").extract_first() if news_name: news_name =re.sub(r'<.*?>', '' , news_name) item['news_name'] = news_name news_detil = response.xpath("//div[@id='artibody']").extract()[0] if len(response.xpath("//div[@id='artibody']").extract()) else '' news_detil = re.sub(r'<.*?>', '', news_detil) item['news_detil'] = news_detil else: item['news_name'] = 'error' item['news_detil']='' # print(news_detil) # item['news_detil'] = node_test.xpath("/string()").extract_first() yield item
main.go
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "flag" "os" "runtime" "k8s.io/kubernetes/cmd/kube-aggregator/pkg/cmd/server" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/util/logs" // force compilation of packages we'll later rely upon _ "k8s.io/kubernetes/cmd/kube-aggregator/pkg/apis/apiregistration/install" _ "k8s.io/kubernetes/cmd/kube-aggregator/pkg/apis/apiregistration/validation" _ "k8s.io/kubernetes/cmd/kube-aggregator/pkg/client/clientset_generated/internalclientset" _ "k8s.io/kubernetes/cmd/kube-aggregator/pkg/client/informers/apiregistration/internalversion" _ "k8s.io/kubernetes/cmd/kube-aggregator/pkg/client/informers/apiregistration/v1alpha1" _ "k8s.io/kubernetes/cmd/kube-aggregator/pkg/client/listers/apiregistration/internalversion" _ "k8s.io/kubernetes/cmd/kube-aggregator/pkg/client/listers/apiregistration/v1alpha1" ) func
() { logs.InitLogs() defer logs.FlushLogs() if len(os.Getenv("GOMAXPROCS")) == 0 { runtime.GOMAXPROCS(runtime.NumCPU()) } cmd := server.NewCommandStartAggregator(os.Stdout, os.Stderr) cmd.Flags().AddGoFlagSet(flag.CommandLine) if err := cmd.Execute(); err != nil { cmdutil.CheckErr(err) } }
main
manifest.rs
use std::{ collections::HashMap, io::{Read, Write}, path::Path, }; use crate::{error::Result, from_file, from_reader, to_file, to_writer}; use super::{Descriptor, MediaType}; use serde::{Deserialize, Serialize}; make_pub!( #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] #[cfg_attr( feature = "builder", derive(derive_builder::Builder, getset::CopyGetters, getset::Getters), builder( pattern = "owned", setter(into, strip_option), build_fn(error = "crate::error::OciSpecError") ) )] /// Unlike the image index, which contains information about a set of images /// that can span a variety of architectures and operating systems, an image /// manifest provides a configuration and set of layers for a single /// container image for a specific architecture and operating system. struct ImageManifest { /// This REQUIRED property specifies the image manifest schema version. /// For this version of the specification, this MUST be 2 to ensure /// backward compatibility with older versions of Docker. The /// value of this field will not change. This field MAY be /// removed in a future version of the specification. #[cfg_attr(feature = "builder", getset(get_copy = "pub"))] schema_version: u32, /// This property is reserved for use, to maintain compatibility. When /// used, this field contains the media type of this document, /// which differs from the descriptor use of mediaType. #[serde(skip_serializing_if = "Option::is_none")] #[cfg_attr(feature = "builder", getset(get = "pub"), builder(default))] media_type: Option<MediaType>, /// This REQUIRED property references a configuration object for a /// container, by digest. Beyond the descriptor requirements, /// the value has the following additional restrictions: /// The media type descriptor property has additional restrictions for /// config. Implementations MUST support at least the following /// media types: /// - application/vnd.oci.image.config.v1+json /// Manifests concerned with portability SHOULD use one of the above /// media types. #[cfg_attr(feature = "builder", getset(get = "pub"))] config: Descriptor, /// Each item in the array MUST be a descriptor. The array MUST have the /// base layer at index 0. Subsequent layers MUST then follow in /// stack order (i.e. from `layers[0]` to `layers[len(layers)-1]`). /// The final filesystem layout MUST match the result of applying /// the layers to an empty directory. The ownership, mode, and other /// attributes of the initial empty directory are unspecified. #[cfg_attr(feature = "builder", getset(get = "pub"))] layers: Vec<Descriptor>, /// This OPTIONAL property contains arbitrary metadata for the image /// manifest. This OPTIONAL property MUST use the annotation /// rules. #[serde(skip_serializing_if = "Option::is_none")] #[cfg_attr(feature = "builder", getset(get = "pub"), builder(default))] annotations: Option<HashMap<String, String>>, } ); impl ImageManifest { /// Attempts to load an image manifest from a file. /// # Errors /// This function will return an [OciSpecError::Io](crate::OciSpecError::Io) /// if the file does not exist or an /// [OciSpecError::SerDe](crate::OciSpecError::SerDe) if the image manifest /// cannot be deserialized. /// # Example /// ``` no_run /// use oci_spec::image::ImageManifest; /// /// let image_manifest = ImageManifest::from_file("manifest.json").unwrap(); /// ``` pub fn from_file<P: AsRef<Path>>(path: P) -> Result<ImageManifest> { from_file(path) } /// Attempts to load an image manifest from a stream. /// # Errors /// This function will return an [OciSpecError::SerDe](crate::OciSpecError::SerDe) /// if the manifest cannot be deserialized. /// # Example
/// /// let reader = File::open("manifest.json").unwrap(); /// let image_manifest = ImageManifest::from_reader(reader).unwrap(); /// ``` pub fn from_reader<R: Read>(reader: R) -> Result<ImageManifest> { from_reader(reader) } /// Attempts to write an image manifest to a file as JSON. If the file already exists, it /// will be overwritten. /// # Errors /// This function will return an [OciSpecError::SerDe](crate::OciSpecError::SerDe) if /// the image manifest cannot be serialized. /// # Example /// ``` no_run /// use oci_spec::image::ImageManifest; /// /// let image_manifest = ImageManifest::from_file("manifest.json").unwrap(); /// image_manifest.to_file("my-manifest.json").unwrap(); /// ``` pub fn to_file<P: AsRef<Path>>(&self, path: P) -> Result<()> { to_file(&self, path, false) } /// Attempts to write an image manifest to a file as pretty printed JSON. If the file already exists, it /// will be overwritten. /// # Errors /// This function will return an [OciSpecError::SerDe](crate::OciSpecError::SerDe) if /// the image manifest cannot be serialized. /// # Example /// ``` no_run /// use oci_spec::image::ImageManifest; /// /// let image_manifest = ImageManifest::from_file("manifest.json").unwrap(); /// image_manifest.to_file_pretty("my-manifest.json").unwrap(); /// ``` pub fn to_file_pretty<P: AsRef<Path>>(&self, path: P) -> Result<()> { to_file(&self, path, true) } /// Attempts to write an image manifest to a stream as JSON. /// # Errors /// This function will return an [OciSpecError::SerDe](crate::OciSpecError::SerDe) if /// the image manifest cannot be serialized. /// # Example /// ``` no_run /// use oci_spec::image::ImageManifest; /// /// let image_manifest = ImageManifest::from_file("manifest.json").unwrap(); /// let mut writer = Vec::new(); /// image_manifest.to_writer(&mut writer); /// ``` pub fn to_writer<W: Write>(&self, writer: &mut W) -> Result<()> { to_writer(&self, writer, false) } /// Attempts to write an image manifest to a stream as pretty printed JSON. /// # Errors /// This function will return an [OciSpecError::SerDe](crate::OciSpecError::SerDe) if /// the image manifest cannot be serialized. /// # Example /// ``` no_run /// use oci_spec::image::ImageManifest; /// /// let image_manifest = ImageManifest::from_file("manifest.json").unwrap(); /// let mut writer = Vec::new(); /// image_manifest.to_writer_pretty(&mut writer); /// ``` pub fn to_writer_pretty<W: Write>(&self, writer: &mut W) -> Result<()> { to_writer(&self, writer, true) } } #[cfg(test)] mod tests { use std::{fs, path::PathBuf}; use super::*; #[cfg(not(feature = "builder"))] use crate::image::Descriptor; #[cfg(feature = "builder")] use crate::image::{Descriptor, DescriptorBuilder}; #[cfg(feature = "builder")] fn create_manifest() -> ImageManifest { let config = DescriptorBuilder::default() .media_type(MediaType::ImageConfig) .size(7023) .digest("sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7") .build() .expect("build config descriptor"); let layers: Vec<Descriptor> = [ ( 32654, "sha256:9834876dcfb05cb167a5c24953eba58c4ac89b1adf57f28f2f9d09af107ee8f0", ), ( 16724, "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", ), ( 73109, "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", ), ] .iter() .map(|l| { DescriptorBuilder::default() .media_type(MediaType::ImageLayerGzip) .size(l.0) .digest(l.1.to_owned()) .build() .expect("build layer") }) .collect(); let manifest = ImageManifestBuilder::default() .schema_version(2 as u32) .config(config) .layers(layers) .build() .expect("build image manifest"); manifest } #[cfg(not(feature = "builder"))] fn create_manifest() -> ImageManifest { let config = Descriptor { media_type: MediaType::ImageConfig, size: 7023, digest: "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7" .to_owned(), urls: None, annotations: None, platform: None, }; let layers = vec![ Descriptor { media_type: MediaType::ImageLayerGzip, size: 32654, digest: "sha256:9834876dcfb05cb167a5c24953eba58c4ac89b1adf57f28f2f9d09af107ee8f0" .to_owned(), urls: None, annotations: None, platform: None, }, Descriptor { media_type: MediaType::ImageLayerGzip, size: 16724, digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b" .to_owned(), urls: None, annotations: None, platform: None, }, Descriptor { media_type: MediaType::ImageLayerGzip, size: 73109, digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736" .to_owned(), urls: None, annotations: None, platform: None, }, ]; let manifest = ImageManifest { schema_version: 2, media_type: None, config, layers, annotations: None, }; manifest } fn get_manifest_path() -> PathBuf { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("test/data/manifest.json") } #[test] fn load_manifest_from_file() { // arrange let manifest_path = get_manifest_path(); let expected = create_manifest(); // act let actual = ImageManifest::from_file(manifest_path).expect("from file"); // assert assert_eq!(actual, expected); } #[test] fn load_manifest_from_reader() { // arrange let reader = fs::read(get_manifest_path()).expect("read manifest"); // act let actual = ImageManifest::from_reader(&*reader).expect("from reader"); // assert let expected = create_manifest(); assert_eq!(actual, expected); } #[test] fn save_manifest_to_file() { // arrange let tmp = std::env::temp_dir().join("save_manifest_to_file"); fs::create_dir_all(&tmp).expect("create test directory"); let manifest = create_manifest(); let manifest_path = tmp.join("manifest.json"); // act manifest .to_file_pretty(&manifest_path) .expect("write manifest to file"); // assert let actual = fs::read_to_string(manifest_path).expect("read actual"); let expected = fs::read_to_string(get_manifest_path()).expect("read expected"); assert_eq!(actual, expected); } #[test] fn save_manifest_to_writer() { // arrange let manifest = create_manifest(); let mut actual = Vec::new(); // act manifest.to_writer_pretty(&mut actual).expect("to writer"); // assert let expected = fs::read(get_manifest_path()).expect("read expected"); assert_eq!(actual, expected); } }
/// ``` no_run /// use oci_spec::image::ImageManifest; /// use std::fs::File;
reliability_test.go
package udpnet import ( "testing" "time" "github.com/stretchr/testify/assert" ) func validateReliabilitySystem(t *testing.T, rs *ReliabilitySystem) { verifySorted(t, rs.sentQueue, rs.maxSequence) verifySorted(t, rs.receivedQueue, rs.maxSequence) verifySorted(t, rs.pendingAckQueue, rs.maxSequence) verifySorted(t, rs.ackedQueue, rs.maxSequence) } func TestReliabilitySystem(t *testing.T)
{ const MaximumSequence = 255 t.Logf("check bit index for sequence\n") assert.EqualValues(t, bitIndexForSequence(99, 100, MaximumSequence), 0) assert.EqualValues(t, bitIndexForSequence(90, 100, MaximumSequence), 9) assert.EqualValues(t, bitIndexForSequence(0, 1, MaximumSequence), 0) assert.EqualValues(t, bitIndexForSequence(255, 0, MaximumSequence), 0) assert.EqualValues(t, bitIndexForSequence(255, 1, MaximumSequence), 1) assert.EqualValues(t, bitIndexForSequence(254, 1, MaximumSequence), 2) assert.EqualValues(t, bitIndexForSequence(254, 2, MaximumSequence), 3) t.Logf("check generate ack bits\n") var packetQueue PacketQueue for i := 0; i < 32; i++ { var data PacketData data.sequence = uint(i) packetQueue.InsertSorted(data, MaximumSequence) verifySorted(t, packetQueue, MaximumSequence) } assert.EqualValues(t, generateAckBits(32, &packetQueue, MaximumSequence), 0xFFFFFFFF) assert.EqualValues(t, generateAckBits(31, &packetQueue, MaximumSequence), 0x7FFFFFFF) assert.EqualValues(t, generateAckBits(33, &packetQueue, MaximumSequence), 0xFFFFFFFE) assert.EqualValues(t, generateAckBits(16, &packetQueue, MaximumSequence), 0x0000FFFF) assert.EqualValues(t, generateAckBits(48, &packetQueue, MaximumSequence), 0xFFFF0000) t.Logf("check generate ack bits with wrap\n") packetQueue = PacketQueue{} for i := 255 - 31; i <= 255; i++ { var data PacketData data.sequence = uint(i) packetQueue.InsertSorted(data, MaximumSequence) verifySorted(t, packetQueue, MaximumSequence) } assert.Len(t, packetQueue, 32) assert.EqualValues(t, generateAckBits(0, &packetQueue, MaximumSequence), 0xFFFFFFFF) assert.EqualValues(t, generateAckBits(255, &packetQueue, MaximumSequence), 0x7FFFFFFF) assert.EqualValues(t, generateAckBits(1, &packetQueue, MaximumSequence), 0xFFFFFFFE) assert.EqualValues(t, generateAckBits(240, &packetQueue, MaximumSequence), 0x0000FFFF) assert.EqualValues(t, generateAckBits(16, &packetQueue, MaximumSequence), 0xFFFF0000) t.Logf("check process ack (1)\n") { var pendingAckQueue PacketQueue for i := 0; i < 33; i++ { var data PacketData data.sequence = uint(i) data.time = 0.0 pendingAckQueue.InsertSorted(data, MaximumSequence) verifySorted(t, pendingAckQueue, MaximumSequence) } var ( ackedQueue PacketQueue acks []uint rtt time.Duration ackedPackets uint ) processAck(32, 0xFFFFFFFF, &pendingAckQueue, &ackedQueue, &acks, &ackedPackets, &rtt, MaximumSequence) assert.Len(t, acks, 33) assert.EqualValues(t, ackedPackets, 33) assert.Len(t, ackedQueue, 33) assert.Len(t, pendingAckQueue, 0) verifySorted(t, ackedQueue, MaximumSequence) for i, ack := range acks { assert.EqualValues(t, ack, i) } for i, pkt := range ackedQueue { assert.EqualValues(t, pkt.sequence, i) } } t.Logf("check process ack (2)\n") { var pendingAckQueue PacketQueue for i := 0; i < 33; i++ { var data PacketData data.sequence = uint(i) data.time = 0.0 pendingAckQueue.InsertSorted(data, MaximumSequence) verifySorted(t, pendingAckQueue, MaximumSequence) } var ( ackedQueue PacketQueue acks []uint rtt time.Duration ackedPackets uint ) processAck(32, 0x0000FFFF, &pendingAckQueue, &ackedQueue, &acks, &ackedPackets, &rtt, MaximumSequence) assert.Len(t, acks, 17) assert.EqualValues(t, ackedPackets, 17) assert.Len(t, ackedQueue, 17) assert.Len(t, pendingAckQueue, 33-17) verifySorted(t, ackedQueue, MaximumSequence) for i, pkt := range pendingAckQueue { assert.EqualValues(t, pkt.sequence, i) } for i, pkt := range ackedQueue { assert.EqualValues(t, pkt.sequence, i+16) } for i, ack := range acks { assert.EqualValues(t, ack, i+16) } } t.Logf("check process ack (3)\n") { var pendingAckQueue PacketQueue for i := 0; i < 32; i++ { var data PacketData data.sequence = uint(i) data.time = 0.0 pendingAckQueue.InsertSorted(data, MaximumSequence) verifySorted(t, pendingAckQueue, MaximumSequence) } var ( ackedQueue PacketQueue acks []uint rtt time.Duration ackedPackets uint ) processAck(48, 0xFFFF0000, &pendingAckQueue, &ackedQueue, &acks, &ackedPackets, &rtt, MaximumSequence) assert.Len(t, acks, 16) assert.EqualValues(t, ackedPackets, 16) assert.Len(t, ackedQueue, 16) assert.Len(t, pendingAckQueue, 16) verifySorted(t, ackedQueue, MaximumSequence) for i, pkt := range pendingAckQueue { assert.EqualValues(t, pkt.sequence, i) } for i, pkt := range ackedQueue { assert.EqualValues(t, pkt.sequence, i+16) } for i, ack := range acks { assert.EqualValues(t, ack, i+16) } } t.Logf("check process ack wrap around (1)\n") { var pendingAckQueue PacketQueue for i := 255 - 31; i <= 256; i++ { var data PacketData data.sequence = uint(i & 0xFF) data.time = 0 pendingAckQueue.InsertSorted(data, MaximumSequence) verifySorted(t, pendingAckQueue, MaximumSequence) } assert.Len(t, pendingAckQueue, 33) var ( ackedQueue PacketQueue acks []uint rtt time.Duration ackedPackets uint ) processAck(0, 0xFFFFFFFF, &pendingAckQueue, &ackedQueue, &acks, &ackedPackets, &rtt, MaximumSequence) assert.Len(t, acks, 33) assert.EqualValues(t, ackedPackets, 33) assert.Len(t, ackedQueue, 33) assert.Len(t, pendingAckQueue, 0) verifySorted(t, ackedQueue, MaximumSequence) for i, ack := range acks { assert.EqualValues(t, ack, (i+255-31)&0xFF) } for i, pkt := range ackedQueue { assert.EqualValues(t, pkt.sequence, (i+255-31)&0xFF) } } t.Logf("check process ack wrap around (2)\n") { var pendingAckQueue PacketQueue for i := 255 - 31; i <= 256; i++ { var data PacketData data.sequence = uint(i & 0xFF) data.time = 0 pendingAckQueue.InsertSorted(data, MaximumSequence) verifySorted(t, pendingAckQueue, MaximumSequence) } assert.Len(t, pendingAckQueue, 33) var ( ackedQueue PacketQueue acks []uint rtt time.Duration ackedPackets uint ) processAck(0, 0x0000FFFF, &pendingAckQueue, &ackedQueue, &acks, &ackedPackets, &rtt, MaximumSequence) assert.Len(t, acks, 17) assert.EqualValues(t, ackedPackets, 17) assert.Len(t, ackedQueue, 17) assert.Len(t, pendingAckQueue, 33-17) verifySorted(t, ackedQueue, MaximumSequence) for i, ack := range acks { assert.EqualValues(t, ack, (i+255-15)&0xFF) } for i, pkt := range pendingAckQueue { assert.EqualValues(t, pkt.sequence, i+255-31) } for i, pkt := range ackedQueue { assert.EqualValues(t, pkt.sequence, ((i + 255 - 15) & 0xFF)) } } t.Logf("check process ack wrap around (3)\n") { var pendingAckQueue PacketQueue for i := 255 - 31; i <= 255; i++ { var data PacketData data.sequence = uint(i & 0xFF) data.time = 0 pendingAckQueue.InsertSorted(data, MaximumSequence) verifySorted(t, pendingAckQueue, MaximumSequence) } assert.Len(t, pendingAckQueue, 32) var ( ackedQueue PacketQueue acks []uint rtt time.Duration ackedPackets uint ) processAck(16, 0xFFFF0000, &pendingAckQueue, &ackedQueue, &acks, &ackedPackets, &rtt, MaximumSequence) assert.Len(t, acks, 16) assert.EqualValues(t, ackedPackets, 16) assert.Len(t, ackedQueue, 16) assert.Len(t, pendingAckQueue, 16) verifySorted(t, ackedQueue, MaximumSequence) for i, ack := range acks { assert.EqualValues(t, ack, (i+255-15)&0xFF) } for i, pkt := range pendingAckQueue { assert.EqualValues(t, pkt.sequence, i+255-31) } for i, pkt := range ackedQueue { assert.EqualValues(t, pkt.sequence, ((i + 255 - 15) & 0xFF)) } } }
classorg_1_1onosproject_1_1net_1_1group_1_1impl_1_1GroupManagerTest.js
var classorg_1_1onosproject_1_1net_1_1group_1_1impl_1_1GroupManagerTest =
[ "testGroupService", "classorg_1_1onosproject_1_1net_1_1group_1_1impl_1_1GroupManagerTest.html#a45be96344c405890011b7497bbf74638", null ] ];
[ [ "setUp", "classorg_1_1onosproject_1_1net_1_1group_1_1impl_1_1GroupManagerTest.html#a3416a43476e7bf1fac7143ba66925fab", null ], [ "tearDown", "classorg_1_1onosproject_1_1net_1_1group_1_1impl_1_1GroupManagerTest.html#a91d5f1e9e5d80cba03bdfe0ca516b23c", null ], [ "testGroupOperationFailure", "classorg_1_1onosproject_1_1net_1_1group_1_1impl_1_1GroupManagerTest.html#acecc5edcc6e6825174cd33408f38b40d", null ],
export_data.py
#!/usr/bin/env python # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from google.api_core.client_options import ClientOptions # [START datalabeling_export_data_beta] def
(dataset_resource_name, annotated_dataset_resource_name, export_gcs_uri): """Exports a dataset from the given Google Cloud project.""" from google.cloud import datalabeling_v1beta1 as datalabeling client = datalabeling.DataLabelingServiceClient() # [END datalabeling_export_data_beta] # If provided, use a provided test endpoint - this will prevent tests on # this snippet from triggering any action by a real human if 'DATALABELING_ENDPOINT' in os.environ: opts = ClientOptions(api_endpoint=os.getenv('DATALABELING_ENDPOINT')) client = datalabeling.DataLabelingServiceClient(client_options=opts) # [START datalabeling_export_data_beta] gcs_destination = datalabeling.types.GcsDestination( output_uri=export_gcs_uri, mime_type='text/csv') output_config = datalabeling.types.OutputConfig( gcs_destination=gcs_destination) response = client.export_data( dataset_resource_name, annotated_dataset_resource_name, output_config ) print('Dataset ID: {}\n'.format(response.result().dataset)) print('Output config:') print('\tGcs destination:') print('\t\tOutput URI: {}\n'.format( response.result().output_config.gcs_destination.output_uri)) # [END datalabeling_export_data_beta] if __name__ == '__main__': parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( '--dataset-resource-name', help='Dataset resource name. Required.', required=True ) parser.add_argument( '--annotated-dataset-resource-name', help='Annotated Dataset resource name. Required.', required=True ) parser.add_argument( '--export-gcs-uri', help='The export GCS URI. Required.', required=True ) args = parser.parse_args() export_data( args.dataset_resource_name, args.annotated_dataset_resource_name, args.export_gcs_uri )
export_data
index.ts
import { useMutation } from '@tkeel/console-hooks'; import { RequestResult } from '@tkeel/console-utils'; export interface RequestData { ids: string[]; } export interface ApiData { '@type': string; } type Props = { // onSuccess?: () => void; onSuccess?: (data: RequestResult<ApiData, undefined, RequestData>) => void; }; const method = 'post'; export default function
({ onSuccess }: Props) { const url = `/tkeel-device/v1/templates/delete `; return useMutation<ApiData, undefined, RequestData>({ url, method, reactQueryOptions: { onSuccess }, }); }
useDeleteTemplateMutation
__init__.py
# -*- coding: utf-8 -*-
__all__ = ('RemoteStashData', 'RemoteStashFolderData')
"""Module with data plugins that represent files of completed calculations jobs that have been stashed.""" from .base import RemoteStashData from .folder import RemoteStashFolderData
env_util.py
# Copyright 2020 Huy Le Nguyen (@usimarit) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Union, List import warnings import tensorflow as tf logger = tf.get_logger() def setup_environment(): # Set memory growth and only log ERRORs """ Setting tensorflow running environment """ warnings.simplefilter("ignore") logger.setLevel("WARN") def setup_devices(devices: List[int], cpu: bool = False): """Setting visible devices Args: devices (list): list of visible devices' indices """ if cpu: cpus = tf.config.list_physical_devices("CPU") tf.config.set_visible_devices(cpus, "CPU") else: gpus = tf.config.list_physical_devices("GPU") if gpus: visible_gpus = [gpus[i] for i in devices] tf.config.set_visible_devices(visible_gpus, "GPU") print("Run on", len(visible_gpus), "Physical GPUs") def
(devices: List[int], tpu_address: str = None): """Setting mirrored strategy for training Args: devices (list): list of visible devices' indices tpu_address (str): an optional custom tpu address Returns: tf.distribute.Strategy: TPUStrategy for training on tpus or MirroredStrategy for training on gpus """ try: return setup_tpu(tpu_address) except (ValueError, tf.errors.NotFoundError) as e: logger.warn(e) pass setup_devices(devices) return tf.distribute.MirroredStrategy() def setup_tpu(tpu_address=None): if tpu_address is None: resolver = tf.distribute.cluster_resolver.TPUClusterResolver() else: resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu="grpc://" + tpu_address) tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) print("All TPUs: ", tf.config.list_logical_devices("TPU")) return tf.distribute.experimental.TPUStrategy(resolver) def has_devices(devices: Union[List[str], str]): if isinstance(devices, list): return all([len(tf.config.list_logical_devices(d)) != 0 for d in devices]) return len(tf.config.list_logical_devices(devices)) != 0
setup_strategy
import_.py
import time from collections import defaultdict from typing import Any, Dict, Iterable, Optional, Tuple from ....models.base import model_registry from ....models.checker import Checker, CheckException from ....models.fields import ( BaseGenericRelationField, BaseRelationField, BaseTemplateField, GenericRelationField, GenericRelationListField, RelationField, RelationListField, ) from ....models.models import Meeting from ....permissions.management_levels import CommitteeManagementLevel from ....permissions.permission_helper import has_committee_management_level from ....shared.exceptions import ActionException, MissingPermission from ....shared.filters import FilterOperator from ....shared.interfaces.event import EventType from ....shared.interfaces.write_request import WriteRequest from ....shared.patterns import KEYSEPARATOR, Collection, FullQualifiedId from ...action import Action from ...mixins.singular_action_mixin import SingularActionMixin from ...util.crypto import get_random_string from ...util.default_schema import DefaultSchema from ...util.register import register_action from ...util.typing import ActionData, ActionResultElement, ActionResults from ..motion.update import RECOMMENDATION_EXTENSION_REFERENCE_IDS_PATTERN from ..user.user_mixin import LimitOfUserMixin @register_action("meeting.import") class MeetingImport(SingularActionMixin, LimitOfUserMixin, Action): """ Action to import a meeting. """ model = Meeting() schema = DefaultSchema(Meeting()).get_default_schema( required_properties=["committee_id"], additional_required_fields={"meeting": {"type": "object"}}, title="Import meeting", description="Import a meeting into the committee.", ) def perform( self, action_data: ActionData, user_id: int, internal: bool = False ) -> Tuple[Optional[WriteRequest], Optional[ActionResults]]: """ Simplified entrypoint to perform the action. """ self.user_id = user_id self.index = 0 action_data = self.get_updated_instances(action_data) instance = next(iter(action_data)) self.validate_instance(instance) try: self.check_permissions(instance) except MissingPermission as e: msg = f"You are not allowed to perform action {self.name}." e.message = msg + " " + e.message raise e instance = self.base_update_instance(instance) self.write_requests.extend(self.create_write_requests(instance)) final_write_request = self.process_write_requests() result = [self.create_action_result_element(instance)] return (final_write_request, result) def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]: meeting_json = instance["meeting"] # checks if the meeting is correct if not len(meeting_json.get("meeting", {}).values()) == 1: raise ActionException("Need exact one meeting in meeting collection.") self.check_usernames_and_generate_new_ones(meeting_json) active_user_in_json = len( [ key for key in meeting_json.get("user", []) if meeting_json["user"][key]["is_active"] ] ) self.check_limit_of_user(active_user_in_json) # save blobs from mediafiles self.mediadata = [] for entry in meeting_json.get("mediafile", {}).values(): if "blob" in entry: self.mediadata.append( (entry.pop("blob"), entry["id"], entry["mimetype"]) ) # check datavalidation checker = Checker(data=meeting_json, mode="external") try: checker.run_check() except CheckException as ce: raise ActionException(str(ce)) self.allowed_collections = checker.allowed_collections for entry in meeting_json.get("motion", {}).values(): if entry.get("all_origin_ids") or entry.get("all_derived_motion_ids"): raise ActionException( "Motion all_origin_ids and all_derived_motion_ids should be empty." ) organization_id = self.check_limit_of_meetings(instance["committee_id"]) self.update_meeting_and_users(instance, organization_id) # replace ids in the meeting_json self.create_replace_map(meeting_json) self.replace_fields(instance) self.update_admin_group(meeting_json) self.upload_mediadata() return instance def check_usernames_and_generate_new_ones(self, json_data: Dict[str, Any]) -> None: used_usernames = set() for entry in json_data.get("user", {}).values(): is_username_unique = False template_username = entry["username"] count = 1 while not is_username_unique: if entry["username"] in used_usernames: entry["username"] = template_username + " " + str(count) count += 1 continue result = self.datastore.filter( Collection("user"), FilterOperator("username", "=", entry["username"]), ["id"], ) if result: entry["username"] = template_username + " " + str(count) count += 1 continue is_username_unique = True used_usernames.add(entry["username"]) def check_limit_of_meetings(self, committee_id: int, text: str = "import") -> int: committee = self.datastore.get( FullQualifiedId(Collection("committee"), committee_id), ["organization_id"] ) organization_id = committee.get("organization_id", 0) organization = self.datastore.get( FullQualifiedId(Collection("organization"), organization_id), ["active_meeting_ids", "limit_of_meetings"], ) if ( limit_of_meetings := organization.get("limit_of_meetings", 0) ) and limit_of_meetings == len(organization.get("active_meeting_ids", [])): raise ActionException( f"You cannot {text} an active meeting, because you reached your limit of {limit_of_meetings} active meetings." ) return organization_id def update_meeting_and_users( self, instance: Dict[str, Any], organization_id: int ) -> None: # update committee_id and is_active_in_organization_id json_data = instance["meeting"] self.get_meeting_from_json(json_data)["committee_id"] = instance["committee_id"] self.get_meeting_from_json(json_data)[ "is_active_in_organization_id" ] = organization_id # generate passwords for entry in json_data["user"].values(): entry["password"] = self.auth.hash(get_random_string(10)) # set enable_anonymous self.get_meeting_from_json(json_data)["enable_anonymous"] = False # set imported_at self.get_meeting_from_json(json_data)["imported_at"] = round(time.time()) def get_meeting_from_json(self, json_data: Any) -> Any:
def create_replace_map(self, json_data: Dict[str, Any]) -> None: replace_map: Dict[str, Dict[int, int]] = defaultdict(dict) for collection in json_data: if not json_data[collection]: continue new_ids = self.datastore.reserve_ids( Collection(collection), len(json_data[collection]) ) for entry, new_id in zip(json_data[collection].values(), new_ids): replace_map[collection][entry["id"]] = new_id self.replace_map = replace_map def replace_fields(self, instance: Dict[str, Any]) -> None: json_data = instance["meeting"] new_json_data = {} for collection in json_data: new_collection = {} for entry in json_data[collection].values(): for field in list(entry.keys()): self.replace_field_ids(collection, entry, field) new_collection[str(entry["id"])] = entry new_json_data[collection] = new_collection instance["meeting"] = new_json_data def replace_field_ids( self, collection: str, entry: Dict[str, Any], field: str, ) -> None: model_field = model_registry[Collection(collection)]().try_get_field(field) if model_field is None: raise ActionException(f"{collection}/{field} is not allowed.") if isinstance(model_field, BaseRelationField): if isinstance(model_field, BaseGenericRelationField): content_list = ( content if isinstance(content := entry.get(field), list) else [content] ) target_collections = [ item.split(KEYSEPARATOR)[0] for item in content_list if item ] else: target_collections = [k.collection for k in model_field.to.keys()] if all(c not in self.allowed_collections for c in target_collections): return if field == "id": entry["id"] = self.replace_map[collection][entry["id"]] elif ( collection == "meeting" and field == "user_ids" and "user" in self.allowed_collections ): entry[field] = [ self.replace_map["user"][id_] for id_ in entry.get(field) or [] ] elif collection == "user" and field == "meeting_ids": entry[field] = list(self.replace_map["meeting"].values()) elif collection == "motion" and field == "recommendation_extension": if entry[field]: fqids_str = RECOMMENDATION_EXTENSION_REFERENCE_IDS_PATTERN.findall( entry[field] ) entry_str = entry[field] entry_list = [] for fqid in fqids_str: search_str = "[" + fqid + "]" idx = entry_str.find(search_str) entry_list.append(entry_str[:idx]) col, id_ = fqid.split(KEYSEPARATOR) replace_str = ( "[" + col + KEYSEPARATOR + str(self.replace_map[col][int(id_)]) + "]" ) entry_list.append(replace_str) entry_str = entry_str[idx + len(replace_str) :] entry_list.append(entry_str) entry[field] = "".join(entry_list) else: if ( isinstance(model_field, BaseTemplateField) and model_field.is_template_field(field) and model_field.replacement_collection ): entry[field] = [ str( self.replace_map[model_field.replacement_collection.collection][ int(id_) ] ) for id_ in entry[field] ] elif ( isinstance(model_field, BaseTemplateField) and model_field.is_template_field(field) and not model_field.replacement_collection ): pass elif isinstance(model_field, RelationField): target_collection = model_field.get_target_collection().collection if entry[field]: entry[field] = self.replace_map[target_collection][entry[field]] elif isinstance(model_field, RelationListField): target_collection = model_field.get_target_collection().collection entry[field] = [ self.replace_map[target_collection][id_] for id_ in entry.get(field) or [] ] elif isinstance(model_field, GenericRelationField): if entry[field]: name, id_ = entry[field].split(KEYSEPARATOR) entry[field] = ( name + KEYSEPARATOR + str(self.replace_map[name][int(id_)]) ) elif isinstance(model_field, GenericRelationListField): new_fqid_list = [] for fqid in entry[field]: name, id_ = fqid.split(KEYSEPARATOR) new_fqid_list.append( name + KEYSEPARATOR + str(self.replace_map[name][int(id_)]) ) entry[field] = new_fqid_list if ( isinstance(model_field, BaseTemplateField) and model_field.replacement_collection and not model_field.is_template_field(field) ): replacement = model_field.get_replacement(field) id_ = int(replacement) new_id_ = self.replace_map[ model_field.replacement_collection.collection ][id_] new_field = model_field.get_structured_field_name(new_id_) tmp = entry[field] del entry[field] entry[new_field] = tmp def update_admin_group(self, data_json: Dict[str, Any]) -> None: admin_group_id = self.get_meeting_from_json(data_json)["admin_group_id"] for entry in data_json["group"].values(): if entry["id"] == admin_group_id: if entry["user_ids"]: entry["user_ids"].insert(0, self.user_id) else: entry["user_ids"] = [self.user_id] self.get_meeting_from_json(data_json)["user_ids"].insert(0, self.user_id) def upload_mediadata(self) -> None: for blob, id_, mimetype in self.mediadata: replaced_id = self.replace_map["mediafile"][id_] self.media.upload_mediafile(blob, replaced_id, mimetype) def create_write_requests(self, instance: Dict[str, Any]) -> Iterable[WriteRequest]: json_data = instance["meeting"] meeting_id = self.get_meeting_from_json(json_data)["id"] write_requests = [] for collection in json_data: for entry in json_data[collection].values(): fqid = FullQualifiedId(Collection(collection), entry["id"]) write_requests.append( self.build_write_request( EventType.Create, fqid, f"import meeting {meeting_id}", entry, ) ) # add meeting to committee/meeting_ids write_requests.append( self.build_write_request( EventType.Update, FullQualifiedId( Collection("committee"), self.get_meeting_from_json(json_data)["committee_id"], ), f"import meeting {meeting_id}", None, {"add": {"meeting_ids": [meeting_id]}, "remove": {}}, ) ) # add meeting to organization/active_meeting_ids if not archived if self.get_meeting_from_json(json_data).get("is_active_in_organization_id"): write_requests.append( self.build_write_request( EventType.Update, FullQualifiedId(Collection("organization"), 1), f"import meeting {meeting_id}", None, {"add": {"active_meeting_ids": [meeting_id]}, "remove": {}}, ) ) return write_requests def create_action_result_element( self, instance: Dict[str, Any] ) -> Optional[ActionResultElement]: """Returns the newly created id.""" return {"id": self.get_meeting_from_json(instance["meeting"])["id"]} def check_permissions(self, instance: Dict[str, Any]) -> None: if not has_committee_management_level( self.datastore, self.user_id, CommitteeManagementLevel.CAN_MANAGE, instance["committee_id"], ): raise MissingPermission(CommitteeManagementLevel.CAN_MANAGE)
""" Small helper to retrieve the one and only meeting object from the import data. """ key = next(iter(json_data["meeting"])) return json_data["meeting"][key]
api-response.js
import { LitElement, html, css } from 'lit-element'; import marked from 'marked'; import { unsafeHTML } from 'lit-html/directives/unsafe-html'; import { schemaInObjectNotation, generateExample } from '~/utils/schema-utils'; import FontStyles from '~/styles/font-styles'; import FlexStyles from '~/styles/flex-styles'; import TableStyles from '~/styles/table-styles'; import InputStyles from '~/styles/input-styles'; import TabStyles from '~/styles/tab-styles'; import BorderStyles from '~/styles/border-styles'; import CustomStyles from '~/styles/custom-styles'; import '~/components/schema-tree'; import '~/components/schema-table'; export default class
extends LitElement { constructor() { super(); this.selectedStatus = ''; this.headersForEachRespStatus = {}; this.mimeResponsesForEachStatus = {}; this.activeSchemaTab = 'schema'; } static get properties() { return { callback: { type: String }, responses: { type: Object }, parser: { type: Object }, schemaStyle: { type: String, attribute: 'schema-style' }, renderStyle: { type: String, attribute: 'render-style' }, selectedStatus: { type: String, attribute: 'selected-status' }, selectedMimeType: { type: String, attribute: 'selected-mime-type' }, activeSchemaTab: { type: String, attribute: 'active-schema-tab' }, schemaExpandLevel: { type: Number, attribute: 'schema-expand-level' }, schemaDescriptionExpanded: { type: String, attribute: 'schema-description-expanded' }, allowSchemaDescriptionExpandToggle: { type: String, attribute: 'allow-schema-description-expand-toggle' }, schemaHideWriteOnly: { type: String, attribute: 'schema-hide-write-only' }, }; } static get styles() { return [ FontStyles, FlexStyles, TabStyles, TableStyles, InputStyles, BorderStyles, css` .resp-head{ vertical-align: middle; padding:16px 0 8px; } .resp-head.divider{ border-top: 1px solid var(--border-color); margin-top:10px; } .resp-status{ font-weight:bold; font-size:calc(var(--font-size-small) + 1px); } .resp-descr{ font-size:calc(var(--font-size-small) + 1px); color:var(--light-fg); } .top-gap{margin-top:16px;} .example-panel{ font-size:var(--font-size-small); margin:0; } .focused-mode, .read-mode { padding-top:24px; margin-top:12px; border-top: 1px dashed var(--border-color); }`, CustomStyles, ]; } render() { return html` <div class="col regular-font response-panel ${this.renderStyle}-mode"> <div class=" ${this.callback === 'true' ? 'tiny-title' : 'req-res-title'} "> ${this.callback === 'true' ? 'CALLBACK RESPONSE' : 'RESPONSE'} </div> <div> ${this.responseTemplate()} <div> </div> `; } resetSelection() { this.selectedStatus = ''; this.selectedMimeType = ''; } /* eslint-disable indent */ responseTemplate() { if (!this.responses) { return ''; } for (const statusCode in this.responses) { if (!this.selectedStatus) { this.selectedStatus = statusCode; } const allMimeResp = {}; for (const mimeResp in this.responses[statusCode]?.content) { const mimeRespObj = this.responses[statusCode].content[mimeResp]; if (!this.selectedMimeType) { this.selectedMimeType = mimeResp; } // Generate Schema const schemaTree = schemaInObjectNotation(mimeRespObj.schema, {}); // Generate Example const respExamples = generateExample( mimeRespObj.examples, mimeRespObj.example, mimeRespObj.schema, mimeResp, true, false, mimeResp.includes('json') ? 'json' : 'text', ); allMimeResp[mimeResp] = { description: this.responses[statusCode].description, examples: respExamples, selectedExample: respExamples[0]?.exampleId || '', schemaTree, }; } // Headers for each response status const tempHeaders = []; for (const key in this.responses[statusCode]?.headers) { tempHeaders.push({ name: key, ...this.responses[statusCode].headers[key] }); } this.headersForEachRespStatus[statusCode] = tempHeaders; this.mimeResponsesForEachStatus[statusCode] = allMimeResp; } return html` ${Object.keys(this.responses).length > 1 ? html`<div class='row' style='flex-wrap:wrap'> ${Object.keys(this.responses).map((respStatus) => html` ${respStatus === '$$ref' // Swagger-Client parser creates '$$ref' object if JSON references are used to create responses - this should be ignored ? '' : html` <button @click="${() => { this.selectedStatus = respStatus; if (this.responses[respStatus].content && Object.keys(this.responses[respStatus].content)[0]) { this.selectedMimeType = Object.keys(this.responses[respStatus].content)[0]; } else { this.selectedMimeType = undefined; } }}" class='m-btn small ${this.selectedStatus === respStatus ? 'primary' : ''}' part="btn--resp ${this.selectedStatus === respStatus ? 'btn-fill--resp' : 'btn-outline--resp'} btn-response-status" style='margin: 8px 4px 0 0' > ${respStatus} </button>` }`) }` : html`<span>${Object.keys(this.responses)[0]}</span>` } </div> ${Object.keys(this.responses).map((status) => html` <div style = 'display: ${status === this.selectedStatus ? 'block' : 'none'}' > <div class="top-gap"> <span class="resp-descr m-markdown ">${unsafeHTML(marked(this.responses[status]?.description || ''))}</span> ${(this.headersForEachRespStatus[status] && this.headersForEachRespStatus[status]?.length > 0) ? html`${this.responseHeaderListTemplate(this.headersForEachRespStatus[status])}` : '' } </div> ${Object.keys(this.mimeResponsesForEachStatus[status]).length === 0 ? '' : html` <div class="tab-panel col"> <div class="tab-buttons row" @click="${(e) => { if (e.target.tagName.toLowerCase() === 'button') { this.activeSchemaTab = e.target.dataset.tab; } }}" > <button class="tab-btn ${this.activeSchemaTab !== 'example' ? 'active' : ''}" data-tab = 'schema' >SCHEMA</button> <button class="tab-btn ${this.activeSchemaTab === 'example' ? 'active' : ''}" data-tab = 'example'>EXAMPLE </button> <div style="flex:1"></div> ${Object.keys(this.mimeResponsesForEachStatus[status]).length === 1 ? html`<span class='small-font-size gray-text' style='align-self:center; margin-top:8px;'> ${Object.keys(this.mimeResponsesForEachStatus[status])[0]} </span>` : html`${this.mimeTypeDropdownTemplate(Object.keys(this.mimeResponsesForEachStatus[status]))}` } </div> ${this.activeSchemaTab === 'example' ? html`<div class ='tab-content col' style = 'flex:1;'> ${this.mimeExampleTemplate(this.mimeResponsesForEachStatus[status][this.selectedMimeType])} </div>` : html`<div class ='tab-content col' style = 'flex:1;'> ${this.mimeSchemaTemplate(this.mimeResponsesForEachStatus[status][this.selectedMimeType])} </div>` } </div> ` }`) } `; } responseHeaderListTemplate(respHeaders) { return html` <div style="padding:16px 0 8px 0" class="resp-headers small-font-size bold-text">RESPONSE HEADERS</div> <table style="border-collapse: collapse; margin-bottom:16px; border:1px solid var(--border-color); border-radius: var(--border-radius)" class="small-font-size mono-font"> ${respHeaders.map((v) => html` <tr> <td style="padding:8px; vertical-align: baseline; min-width:120px; border-top: 1px solid var(--light-border-color); text-overflow: ellipsis;"> ${v.name || ''} </td> <td style="padding:4px; vertical-align: baseline; padding:0 5px; border-top: 1px solid var(--light-border-color); text-overflow: ellipsis;"> ${v.schema.type || ''} </td> <td style="padding:8px; vertical-align: baseline; border-top: 1px solid var(--light-border-color);text-overflow: ellipsis;"> <div class="m-markdown-small regular-font" >${unsafeHTML(marked(v.description || ''))}</div> </td> <td style="padding:8px; vertical-align: baseline; border-top: 1px solid var(--light-border-color); text-overflow: ellipsis;"> ${v.schema.example || ''} </td> </tr> `)} </table>`; } mimeTypeDropdownTemplate(mimeTypes) { return html` <select @change="${(e) => { this.selectedMimeType = e.target.value; }}" style='margin-bottom: -1px; z-index:1'> ${mimeTypes.map((mimeType) => html`<option value='${mimeType}' ?selected = '${mimeType === this.selectedMimeType}'> ${mimeType} </option>`)} </select>`; } onSelectExample(e) { const exampleContainerEl = e.target.closest('.example-panel'); const exampleEls = [...exampleContainerEl.querySelectorAll('.example')]; exampleEls.forEach((v) => { v.style.display = v.dataset.example === e.target.value ? 'block' : 'none'; }); } mimeExampleTemplate(mimeRespDetails) { if (!mimeRespDetails) { return html` <pre style='color:var(--red)' class = '${this.renderStyle === 'read' ? 'read example-panel border pad-8-16' : 'example-panel border-top'}'> No example provided </pre> `; } return html` ${mimeRespDetails.examples.length === 1 ? html` ${mimeRespDetails.examples[0].exampleFormat === 'json' ? html` <json-tree render-style = '${this.renderStyle}' .data="${mimeRespDetails.examples[0].exampleValue}" class = 'example-panel ${this.renderStyle === 'read' ? 'border pad-8-16' : 'border-top pad-top-8'}' ></json-tree>` : html` <pre class = 'example-panel ${this.renderStyle === 'read' ? 'border pad-8-16' : 'border-top pad-top-8'}'>${mimeRespDetails.examples[0].exampleValue}</pre> ` }` : html` <span class = 'example-panel ${this.renderStyle === 'read' ? 'border pad-8-16' : 'border-top pad-top-8'}'> <select style="min-width:100px; max-width:100%" @change='${(e) => this.onSelectExample(e)}'> ${mimeRespDetails.examples.map((v) => html`<option value="${v.exampleId}" ?selected=${v.exampleId === mimeRespDetails.selectedExample} > ${v.exampleSummary.length > 80 ? v.exampleId : v.exampleSummary} </option>`)} </select> ${mimeRespDetails.examples.map((v) => html` <div class="example" data-example = '${v.exampleId}' style = "display: ${v.exampleId === mimeRespDetails.selectedExample ? 'block' : 'none'}"> ${v.exampleSummary && v.exampleSummary.length > 80 ? html`<div style="padding: 4px 0"> ${v.exampleSummary} </div>` : ''} ${v.exampleDescription ? html`<div class="m-markdown-small" style="padding: 4px 0"> ${unsafeHTML(marked(v.exampleDescription || ''))} </div>` : ''} ${v.exampleFormat === 'json' ? html` <json-tree render-style = '${this.renderStyle}' .data = '${v.exampleValue}' ></json-tree>` : html`<pre>${v.exampleValue}</pre>` } </div> `)} </span> ` } `; } mimeSchemaTemplate(mimeRespDetails) { if (!mimeRespDetails) { return html` <pre style='color:var(--red)' class = '${this.renderStyle === 'read' ? 'border pad-8-16' : 'border-top'}'> Schema not found</pre> `; } return html` ${this.schemaStyle === 'table' ? html` <schema-table render-style = '${this.renderStyle}' .data = '${mimeRespDetails.schemaTree}' schema-expand-level = "${this.schemaExpandLevel}" schema-description-expanded = "${this.schemaDescriptionExpanded}" allow-schema-description-expand-toggle = "${this.allowSchemaDescriptionExpandToggle}", schema-hide-read-only = false schema-hide-write-only = ${this.schemaHideWriteOnly} > </schema-tree> ` : html` <schema-tree render-style = '${this.renderStyle}' .data = '${mimeRespDetails.schemaTree}' schema-expand-level = "${this.schemaExpandLevel}" schema-description-expanded = "${this.schemaDescriptionExpanded}" allow-schema-description-expand-toggle = "${this.allowSchemaDescriptionExpandToggle}", schema-hide-read-only = false schema-hide-write-only = ${this.schemaHideWriteOnly} > </schema-tree>` }`; } /* eslint-enable indent */ } // Register the element with the browser customElements.define('api-response', ApiResponse);
ApiResponse
mail.module.ts
import { NgModule } from '@angular/core'; import { RouterModule, Routes } from '@angular/router'; import { MatButtonModule, MatCheckboxModule, MatDialogModule, MatFormFieldModule, MatIconModule, MatInputModule, MatMenuModule, MatRippleModule, MatSelectModule, MatToolbarModule } from '@angular/material'; import { TranslateModule } from '@ngx-translate/core'; import { FuseSharedModule } from '@fuse/shared.module'; import { FuseSidebarModule } from '@fuse/components'; import * as fromGuards from 'app/main/apps/mail-ngrx/store/guards/index'; import { MailNgrxStoreModule } from 'app/main/apps/mail-ngrx/store/store.module'; import { MailNgrxComponent } from 'app/main/apps/mail-ngrx/mail.component'; import { MailNgrxListComponent } from 'app/main/apps/mail-ngrx/mail-list/mail-list.component'; import { MailNgrxListItemComponent } from 'app/main/apps/mail-ngrx/mail-list/mail-list-item/mail-list-item.component'; import { MailNgrxDetailsComponent } from 'app/main/apps/mail-ngrx/mail-details/mail-details.component'; import { MailNgrxMainSidebarComponent } from 'app/main/apps/mail-ngrx/sidebars/main/main-sidebar.component'; import { MailNgrxComposeDialogComponent } from 'app/main/apps/mail-ngrx/dialogs/compose/compose.component';
{ path : 'label/:labelHandle', component : MailNgrxComponent, canActivate: [fromGuards.ResolveGuard] }, { path : 'label/:labelHandle/:mailId', component : MailNgrxComponent, canActivate: [fromGuards.ResolveGuard] }, { path : 'filter/:filterHandle', component : MailNgrxComponent, canActivate: [fromGuards.ResolveGuard] }, { path : 'filter/:filterHandle/:mailId', component : MailNgrxComponent, canActivate: [fromGuards.ResolveGuard] }, { path : ':folderHandle', component : MailNgrxComponent, canActivate: [fromGuards.ResolveGuard] }, { path : ':folderHandle/:mailId', component : MailNgrxComponent, canActivate: [fromGuards.ResolveGuard] }, { path : '**', redirectTo: 'inbox' } ]; @NgModule({ declarations : [ MailNgrxComponent, MailNgrxListComponent, MailNgrxListItemComponent, MailNgrxDetailsComponent, MailNgrxMainSidebarComponent, MailNgrxComposeDialogComponent ], imports : [ RouterModule.forChild(routes), MatButtonModule, MatCheckboxModule, MatDialogModule, MatFormFieldModule, MatIconModule, MatInputModule, MatMenuModule, MatRippleModule, MatSelectModule, MatToolbarModule, TranslateModule, FuseSharedModule, FuseSidebarModule, MailNgrxStoreModule ], providers : [ MailNgrxService, fromGuards.ResolveGuard ], entryComponents: [MailNgrxComposeDialogComponent] }) export class MailNgrxModule { }
import { MailNgrxService } from 'app/main/apps/mail-ngrx/mail.service'; const routes: Routes = [
tests.rs
use swc_common::{util::take::Take, FileName, Mark, SyntaxContext}; use swc_ecma_ast::*; use swc_ecma_parser::parse_file_as_expr; use swc_ecma_transforms_base::fixer::fixer; use swc_ecma_utils::ExprCtx; use swc_ecma_visit::{noop_visit_mut_type, FoldWith, VisitMut, VisitMutWith}; use tracing::{info, warn}; use super::negate_cost; use crate::{compress::util::negate, debug::dump}; struct UnwrapParen; impl VisitMut for UnwrapParen { noop_visit_mut_type!(); fn visit_mut_expr(&mut self, e: &mut Expr) { e.visit_mut_children_with(self); if let Expr::Paren(p) = e { *e = *p.expr.take(); } } } fn assert_negate_cost(s: &str, in_bool_ctx: bool, is_ret_val_ignored: bool, expected: isize) { testing::run_test2(false, |cm, handler| { let fm = cm.new_source_file(FileName::Anon, s.to_string()); let mut e = parse_file_as_expr(
&mut vec![], ) .map_err(|e| { e.into_diagnostic(&handler).emit(); })?; e.visit_mut_with(&mut UnwrapParen); let input = { let e = e.clone(); let e = e.fold_with(&mut fixer(None)); dump(&e, true) }; let expr_ctx = ExprCtx { unresolved_ctxt: SyntaxContext::empty().apply_mark(Mark::new()), is_unresolved_ref_safe: false, }; let real = { let mut real = e.clone(); negate(&expr_ctx, &mut real, in_bool_ctx, is_ret_val_ignored); let real = real.fold_with(&mut fixer(None)); dump(&real, true) }; { warn!( "Actual: {} ;Input = {}, Real = {}", real.len() as isize - input.len() as isize, input.len(), real.len() ); info!("Real: {}", real); info!("Input: {}", input); } let actual = negate_cost(&expr_ctx, &e, in_bool_ctx, is_ret_val_ignored); assert_eq!( actual, expected, "Expected negation cost of {} to be {}, but got {}", s, expected, actual, ); Ok(()) }) .unwrap(); } #[test] fn negate_cost_1() { assert_negate_cost( "this[key] && !this.hasOwnProperty(key) || (this[key] = value)", false, true, 2, ); } #[test] #[ignore] fn negate_cost_2() { assert_negate_cost( "(!this[key] || this.hasOwnProperty(key)) && (this[key] = value)", false, true, -2, ); } #[test] fn negate_cost_3() { assert_negate_cost( "(pvt || (delete cache[id].data, isEmptyDataObject(cache[id]))) && (isNode ? \ jQuery.cleanData([ elem ], !0) : jQuery.support.deleteExpando || cache != cache.window ? delete cache[id] : \ cache[id] = null)", true, true, 4, ); } #[test] #[ignore] fn negate_cost_4() { // "(!force && !this._isRebuildRequired()) && !self._buildList()", assert_negate_cost( "!(force || this._isRebuildRequired()) || self._buildList()", true, true, 2, ); } #[test] fn negate_cost_5() { assert_negate_cost( "!(capacity && codeResult1 && (codeResult2 = codeResult1, !((list = config.blacklist) && \ list.some(function(item) { return Object.keys(item).every(function(key) { return item[key] === codeResult2[key]; }); }))) && (codeResult3 = codeResult1, \"function\" != typeof (filter = config.filter) || \ filter(codeResult3))) || (capacity--, result.codeResult = codeResult, capture && \ (canvas.width = imageSize.x, canvas.height = imageSize.y, image_debug.a.drawImage(data, \ imageSize, ctx), result.frame = canvas.toDataURL()), results.push(result))", true, true, -1, ); } #[test] fn negate_cost_5_1() { assert_negate_cost( "!(capacity && codeResult1 && (codeResult2 = codeResult1, !((list = config.blacklist) && \ list.some(function(item) { return Object.keys(item).every(function(key) { return item[key] === codeResult2[key]; }); }))) && (codeResult3 = codeResult1, \"function\" != typeof (filter = config.filter) || \ filter(codeResult3)))", true, false, -1, ); } #[test] fn negate_cost_5_2() { assert_negate_cost( "!(capacity && codeResult1 && (codeResult2 = codeResult1, !((list = config.blacklist) && \ list.some(function(item) { return Object.keys(item).every(function(key) { return item[key] === codeResult2[key]; }); }))))", true, false, -1, ); } #[test] fn negate_cost_5_3() { assert_negate_cost( "!(codeResult2 = codeResult1, !((list = config.blacklist) && list.some(function(item) { return Object.keys(item).every(function(key) { return item[key] === codeResult2[key]; }); })))", true, false, -1, ); } #[test] fn negate_cost_6() { assert_negate_cost( " capacity && codeResult1 && (codeResult2 = codeResult1, !((list = config.blacklist) && \ list.some(function(item) { return Object.keys(item).every(function(key) { return item[key] === codeResult2[key]; }); }))) && (codeResult3 = codeResult1, \"function\" != typeof (filter = config.filter) || \ filter(codeResult3)) && (capacity--, result.codeResult = codeResult, capture && \ (canvas.width = imageSize.x, canvas.height = imageSize.y, image_debug.a.drawImage(data, \ imageSize, ctx), result.frame = canvas.toDataURL()), results.push(result))", true, true, 4, ); } #[test] fn negate_cost_6_1() { assert_negate_cost( " capacity && codeResult1 && (codeResult2 = codeResult1, !((list = config.blacklist) && \ list.some(function(item) { return Object.keys(item).every(function(key) { return item[key] === codeResult2[key]; }); }))) && (codeResult3 = codeResult1, \"function\" != typeof (filter = config.filter) || \ filter(codeResult3))", true, false, 4, ); } #[test] fn negate_cost_6_2() { assert_negate_cost( " !((list = config.blacklist) && list.some(function(item) { return Object.keys(item).every(function(key) { return item[key] === codeResult2[key]; }); }))", true, false, -1, ); } #[test] #[ignore] fn next_31077_1() { assert_negate_cost( "((!a || !(a instanceof TextViewDesc1) || /\\n$/.test(a.node.text)) && ((result1.safari \ || result1.chrome) && a && 'false' == a.dom.contentEditable && this.addHackNode('IMG'), \ this.addHackNode('BR')))", true, true, 0, ); } #[test] #[ignore] fn next_31077_2() { assert_negate_cost( "!((!a || !(a instanceof TextViewDesc1) || /\\n$/.test(a.node.text)) || ((result1.safari \ || result1.chrome) && a && 'false' == a.dom.contentEditable && this.addHackNode('IMG'), \ this.addHackNode('BR')))", true, true, -3, ); }
&fm, Default::default(), swc_ecma_ast::EsVersion::latest(), None,
broker.py
#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = 'phil.zhang' from abc import ABCMeta, abstractmethod, abstractproperty import six from algotrade.event_engine import EventEngineMixin # class BaseBroker(six.with_metaclass(ABCMeta), EventEngineMixin): # def __init__(self): # self.event_engine = EventEngineMixin.event_engine ###################################################################### # Base broker class class BaseBroker(six.with_metaclass(ABCMeta), EventEngineMixin): """Base class for brokers. .. note:: This is a base class and should not be used directly. """ # __metaclass__ = abc.ABCMeta def __init__(self): self.event_engine = EventEngineMixin.event_engine # def notify_order_event(self, order_event): # self.__order_event.emit(self, order_event) # Handlers should expect 2 parameters: # 1: broker instance # 2: OrderEvent instance @property def order_updated_event(self): return self.__order_event @abstractmethod def instrument_traits(self, instrument): raise NotImplementedError() @abstractmethod def get_cash(self, include_short=True): """ Returns the available cash. :param include_short: Include cash from short positions. :type include_short: boolean. """ raise NotImplementedError() @abstractmethod def shares(self, instrument): """Returns the number of shares for an instrument.""" raise NotImplementedError() @abstractproperty def positions(self): """Returns a dictionary that maps instruments to shares.""" raise NotImplementedError() @abstractmethod def active_orders(self, instrument=None): """Returns a sequence with the orders that are still active. :param instrument: An optional instrument identifier to return only the active orders for the given instrument. :type instrument: string. """ raise NotImplementedError() @abstractmethod def submit_order(self, order): """Submits an order. :param order: The order to submit. :type order: :class:`BaseOrder`. .. note:: * After this call the order is in SUBMITTED state and an event is not triggered for this transition. * Calling this twice on the same order will raise an exception. """ raise NotImplementedError() # def place_order(self, order): # # Deprecated since v0.16 # warninghelpers.deprecation_warning("place_order will be deprecated in the next version. Please use submit_order instead.", stacklevel=2) # return self.submit_order(order) @abstractmethod def create_market_order(self, action, instrument, quantity, on_close=False): """Creates a Market order. A market order is an order to buy or sell a stock at the best available price. Generally, this type of order will be executed immediately. However, the price at which a market order will be executed is not guaranteed. :param action: The order action. :type action: BaseOrder.Action.BUY, or BaseOrder.Action.BUY_TO_COVER, or BaseOrder.Action.SELL or BaseOrder.Action.SELL_SHORT. :param instrument: Instrument identifier. :type instrument: string. :param quantity: BaseOrder quantity. :type quantity: int/float. :param on_close: True if the order should be filled as close to the closing price as possible (Market-On-Close order). Default is False. :type on_close: boolean. :rtype: A :class:`MarketOrder` subclass. """ raise NotImplementedError() @abstractmethod def create_limit_order(self, action, instrument, limit_price, quantity): """Creates a Limit order. A limit order is an order to buy or sell a stock at a specific price or better. A buy limit order can only be executed at the limit price or lower, and a sell limit order can only be executed at the limit price or higher. :param action: The order action. :type action: BaseOrder.Action.BUY, or BaseOrder.Action.BUY_TO_COVER, or BaseOrder.Action.SELL or BaseOrder.Action.SELL_SHORT. :param instrument: Instrument identifier. :type instrument: string. :param limit_price: The order price. :type limit_price: float
:rtype: A :class:`LimitOrder` subclass. """ raise NotImplementedError() @abstractmethod def create_stop_order(self, action, instrument, stop_price, quantity): """Creates a Stop order. A stop order, also referred to as a stop-loss order, is an order to buy or sell a stock once the price of the stock reaches a specified price, known as the stop price. When the stop price is reached, a stop order becomes a market order. A buy stop order is entered at a stop price above the current market price. Investors generally use a buy stop order to limit a loss or to protect a profit on a stock that they have sold short. A sell stop order is entered at a stop price below the current market price. Investors generally use a sell stop order to limit a loss or to protect a profit on a stock that they own. :param action: The order action. :type action: BaseOrder.Action.BUY, or BaseOrder.Action.BUY_TO_COVER, or BaseOrder.Action.SELL or BaseOrder.Action.SELL_SHORT. :param instrument: Instrument identifier. :type instrument: string. :param stop_price: The trigger price. :type stop_price: float :param quantity: BaseOrder quantity. :type quantity: int/float. :rtype: A :class:`StopOrder` subclass. """ raise NotImplementedError() @abstractmethod def create_stop_limit_order(self, action, instrument, stop_price, limit_price, quantity): """Creates a Stop-Limit order. A stop-limit order is an order to buy or sell a stock that combines the features of a stop order and a limit order. Once the stop price is reached, a stop-limit order becomes a limit order that will be executed at a specified price (or better). The benefit of a stop-limit order is that the investor can control the price at which the order can be executed. :param action: The order action. :type action: BaseOrder.Action.BUY, or BaseOrder.Action.BUY_TO_COVER, or BaseOrder.Action.SELL or BaseOrder.Action.SELL_SHORT. :param instrument: Instrument identifier. :type instrument: string. :param stop_price: The trigger price. :type stop_price: float :param limit_price: The price for the limit order. :type limit_price: float :param quantity: BaseOrder quantity. :type quantity: int/float. :rtype: A :class:`StopLimitOrder` subclass. """ raise NotImplementedError() @abstractmethod def cancel_order(self, order): """Requests an order to be canceled. If the order is filled an Exception is raised. :param order: The order to cancel. :type order: :class:`BaseOrder`. """ raise NotImplementedError()
:param quantity: BaseOrder quantity. :type quantity: int/float.
mips_unknown_linux_gnu.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use target::{Target, TargetOptions}; pub fn target() -> Target { Target { llvm_target: "mips-unknown-linux-gnu".to_string(), target_endian: "big".to_string(), target_pointer_width: "32".to_string(), data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), arch: "mips".to_string(), target_os: "linux".to_string(), target_env: "gnu".to_string(), target_vendor: "unknown".to_string(),
cpu: "mips32r2".to_string(), features: "+mips32r2,+soft-float".to_string(), max_atomic_width: 32, ..super::linux_base::opts() }, } }
options: TargetOptions {
test_auto_SurfaceSnapshots.py
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.utils import SurfaceSnapshots def test_SurfaceSnapshots_inputs(): input_map = dict(annot_file=dict(argstr='-annotation %s', xor=['annot_name'], ), annot_name=dict(argstr='-annotation %s', xor=['annot_file'], ), args=dict(argstr='%s', ), colortable=dict(argstr='-colortable %s', ), demean_overlay=dict(argstr='-zm', ), environ=dict(nohash=True, usedefault=True, ), hemi=dict(argstr='%s', mandatory=True, position=2, ), identity_reg=dict(argstr='-overlay-reg-identity', xor=['overlay_reg', 'identity_reg', 'mni152_reg'], ), ignore_exception=dict(nohash=True, usedefault=True, ), invert_overlay=dict(argstr='-invphaseflag 1', ), label_file=dict(argstr='-label %s', xor=['label_name'], ), label_name=dict(argstr='-label %s', xor=['label_file'], ), label_outline=dict(argstr='-label-outline', ), label_under=dict(argstr='-labels-under', ), mni152_reg=dict(argstr='-mni152reg', xor=['overlay_reg', 'identity_reg', 'mni152_reg'], ), orig_suffix=dict(argstr='-orig %s', ), overlay=dict(argstr='-overlay %s', requires=['overlay_range'], ), overlay_range=dict(argstr='%s', ), overlay_range_offset=dict(argstr='-foffset %.3f', ), overlay_reg=dict(argstr='-overlay-reg %s', xor=['overlay_reg', 'identity_reg', 'mni152_reg'], ), patch_file=dict(argstr='-patch %s', ), reverse_overlay=dict(argstr='-revphaseflag 1', ), screenshot_stem=dict(), show_color_scale=dict(argstr='-colscalebarflag 1', ), show_color_text=dict(argstr='-colscaletext 1', ), show_curv=dict(argstr='-curv', xor=['show_gray_curv'], ), show_gray_curv=dict(argstr='-gray', xor=['show_curv'], ), six_images=dict(), sphere_suffix=dict(argstr='-sphere %s', ), stem_template_args=dict(requires=['screenshot_stem'], ), subject_id=dict(argstr='%s', mandatory=True, position=1, ), subjects_dir=dict(), surface=dict(argstr='%s', mandatory=True, position=3, ),
terminal_output=dict(mandatory=True, nohash=True, ), truncate_overlay=dict(argstr='-truncphaseflag 1', ), ) inputs = SurfaceSnapshots.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SurfaceSnapshots_outputs(): output_map = dict(snapshots=dict(), ) outputs = SurfaceSnapshots.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value
tcl_script=dict(argstr='%s', genfile=True, ),
http2.test.py
''' Verify basic HTTP/2 functionality. ''' # @file # # Copyright 2020, Verizon Media # SPDX-License-Identifier: Apache-2.0 # Test.Summary = ''' Verify basic HTTP/2 functionality. ''' # # Test 1: Verify correct behavior of a single HTTP/2 transaction. # r = Test.AddTestRun("Verify HTTP/2 processing of a single HTTP transaction") client = r.AddClientProcess("client1", "replay_files/single_transaction", https_ports=[4443], other_args="--verbose diag") server = r.AddServerProcess("server1", "replay_files/single_transaction", https_ports=[4444], other_args="--verbose diag") proxy = r.AddProxyProcess("proxy1", listen_port=4443, server_port=4444, use_ssl=True, use_http2_to_1=True) if Condition.IsPlatform("darwin"): proxy.Streams.stdout = "gold/single_transaction_proxy.gold_macos" client.Streams.stdout = "gold/single_transaction_client.gold_macos" server.Streams.stdout = "gold/single_transaction_server.gold_macos" else: proxy.Streams.stdout = "gold/single_transaction_proxy.gold" client.Streams.stdout = "gold/single_transaction_client.gold" server.Streams.stdout = "gold/single_transaction_server.gold" client.Streams.stdout += Testers.ExcludesExpression( "Violation:", "There should be no verification errors because there are none added.") server.Streams.stdout += Testers.ExcludesExpression(
"Violation:", "There should be no verification errors because there are none added.")
portal.component.ts
import { Component } from '@angular/core'; import Util from '../common/util'; declare var $:any; @Component({ selector: 'portal', templateUrl:'./portal.component.html', styles:[''] }) export class
{ constructor() { } ngOnInit() { Util.adjustLayout(); } }
PortalComponent
path_test.go
/* Sniperkit-Bot - Status: analyzed */ // Copyright 2015-2017, Cyrill @ Schumacher.fm and the CoreStore contributors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package net_test import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/sniperkit/snk.fork.corestoreio-pkg/net" ) func TestShiftPath(t *testing.T)
var benchmarkShiftPath string // BenchmarkShiftPath-4 10000000 217 ns/op 96 B/op 3 allocs/op func BenchmarkShiftPath(b *testing.B) { for i := 0; i < b.N; i++ { benchmarkShiftPath, _ = net.ShiftPath("/catalog/product/view/id/123") } }
{ tests := []struct{ have, head, tail string }{ {"/", "", "/"}, {"/contact", "contact", "/"}, {"/contact/post", "contact", "/post"}, {"./contact/post", "contact", "/post"}, {"../contact/post", "contact", "/post"}, {"../../contact/post", "contact", "/post"}, {".../../contact/post", "contact", "/post"}, {"contact/post", "contact", "/post"}, {"/catalog/product/view/id/123", "catalog", "/product/view/id/123"}, } for i, test := range tests { hh, ht := net.ShiftPath(test.have) assert.Exactly(t, test.head, hh, "Head Index %d", i) require.Exactly(t, test.tail, ht, "Tail Index %d", i) } }
themes.ts
import * as colors from './colors'; import { Breakpoints, PreciseFullTheme, ButtonThemeSettings, FlyoutStyling, MetroInfoTileStyling, AccordionCardStyling, } from './common'; import { distance } from './distance'; import { remCalc } from './utils/remCalc'; const colorCycle = [ colors.indigo, colors.green, colors.orangeNeon, colors.brightLemon, colors.cyan, colors.grey1, colors.orange, colors.azur, colors.purpleRed, colors.greenNeon, ]; export const breakpoints: Breakpoints = { medium: 740, // tablet large: 980, // desktop xLarge: 1200, // hd desktop max: 1800, // full hd desktop }; export const buttonPrimary: ButtonThemeSettings = { background: colors.cyan, hoverBackground: colors.ocean, focusBackground: colors.midnight, disabledBackground: colors.pinkSwan, text: colors.white, hoverText: colors.white, focusText: colors.white,
disabledText: colors.white, border: 'none', hoverBorder: 'none', focusBorder: 'none', disabledBorder: 'none', lineHeightMedium: '22px', lineHeightSmall: '18px', }; export const buttonSecondary: ButtonThemeSettings = { background: colors.white, hoverBackground: colors.white, focusBackground: colors.white, disabledBackground: colors.white, text: colors.cyan, hoverText: colors.ocean, focusText: colors.midnight, disabledText: colors.pinkSwan, border: `1px solid ${colors.cyan}`, hoverBorder: `1px solid ${colors.ocean}`, focusBorder: `1px solid ${colors.midnight}`, disabledBorder: `1px solid ${colors.pinkSwan}`, lineHeightMedium: '20px', lineHeightSmall: '16px', }; export const flyout: FlyoutStyling = { maxHeight: '600px', maxWidth: '300px', background: colors.white, textColor: colors.black, fontSize: remCalc('16px'), }; export const metroInfoTile: MetroInfoTileStyling = { textColor: colors.white, background: colors.azur, size: '150px', }; export const accordionCard: AccordionCardStyling = { borderColor: colors.lighterGray, openedBorderColor: colors.tuna, openedHeaderBorderColor: colors.tuna, openedHeaderBackground: colors.whiterSmoke, headerBackground: colors.white, headerPadding: distance.medium, }; export const light: PreciseFullTheme = { flyout, metroInfoTile, accordionCard, ui0: colors.cyan, ui1: colors.white, ui2: colors.whiterSmoke, ui3: colors.whiteSmoke, ui4: colors.lighterGray, ui5: colors.tuna, ui6: colors.ocean, ui7: colors.midnight, ui8: colors.skyBlue, text0: colors.cyan, text1: colors.eclipse, text2: colors.charcoal, text3: colors.pinkSwan, text4: colors.white, text5: colors.darkGray, text6: colors.black, text7: colors.white, buttonPrimary, buttonSecondary, buttonIconPosition: 'right', primary: colors.pacificBlue, secondary: colors.lighterGray, textDisabled: colors.pinkSwan, inputDisabled: colors.whiterSmoke, inputError: colors.purpleRed, colorCycle, headingsPadding: '0 0 0.5rem 0', tagBackground: colors.darkGray, tagColor: colors.black, toggleHeadBackground: colors.brightBlue, toggleHeadActiveBackground: colors.azur, breakpoints, accordionLine: `1px solid ${colors.lighterGray}`, accordionPadding: `${distance.medium}`, accordionContentPadding: `${distance.small} ${distance.medium} ${distance.xlarge} ${remCalc('50px')}`, fontFamily: 'inherit', tableBorder: `1px solid ${colors.lighterGray}`, tableLayout: 'auto', tableHeadPadding: `${distance.medium} ${distance.large}`, badgeColor: 'orange', badgeBackground: 'white', notificationColorNone: colors.tuna, notificationColorSuccess: colors.lightGreen, notificationColorInfo: colors.cyan, notificationColorWarning: colors.brightLemon, notificationColorError: colors.purpleRed, notificationPadding: `${distance.small} ${distance.medium}`, notificationBorderWidth: `1px 1px 1px 5px`, notificationBoxShadow: `0 4px 8px 0 rgba(0, 0, 0, 0.1)`, notificationTitleFontSize: remCalc('16px'), notificationTitleLineHeight: `22px`, notificationTextFontSize: remCalc('16px'), notificationTextLineHeight: `22px`, notificationIconMarginRight: distance.medium, highlightColor: colors.brightLemon, };
instance.rs
use std::{ ffi::OsString, fs::File, io::Write, path::{Path, PathBuf}, process::{Child, Command, Output}, str::FromStr, time::Duration, }; use bitcoinsuite_core::Net; use bitcoinsuite_error::{Result, WrapErr}; use bitcoinsuite_test_utils::pick_ports; use tempdir::TempDir; use crate::{ cli::BitcoinCli, rpc_client::{BitcoindRpcClient, BitcoindRpcClientConf}, BitcoindError, }; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum BitcoindChain { XEC, BCH, XPI, } #[derive(Debug, Clone)] pub struct BitcoindConf { pub bitcoind_path: PathBuf, pub bitcoincli_path: PathBuf, pub additional_args: Vec<OsString>, pub p2p_port: u16, pub rpc_port: u16, pub net: Net, pub chain: BitcoindChain, } #[derive(Debug)] pub struct BitcoindInstance { conf: BitcoindConf, instance_dir: PathBuf, datadir_arg: OsString, bitcoind_child: Child, cli: BitcoinCli, client: BitcoindRpcClient, } impl BitcoindConf { pub fn from_chain_regtest( bin_folder: impl AsRef<Path>, chain: BitcoindChain, additional_args: Vec<OsString>, ) -> Result<Self> { Self::new(bin_folder, chain, Net::Regtest, additional_args) } pub fn new( bin_folder: impl AsRef<Path>, chain: BitcoindChain, net: Net, additional_args: Vec<OsString>, ) -> Result<Self> { let ports = pick_ports(2)?; let bin_folder = bin_folder.as_ref(); let bin_folder = match chain { BitcoindChain::XEC => bin_folder.join("bitcoin-abc").join("bin"), BitcoindChain::BCH => bin_folder.join("bitcoin-cash-node").join("bin"), BitcoindChain::XPI => bin_folder.join("lotusd").join("bin"), }; let (bitcoind_path, bitcoincli_path) = match chain { BitcoindChain::XPI => (bin_folder.join("lotusd"), bin_folder.join("lotus-cli")), _ => (bin_folder.join("bitcoind"), bin_folder.join("bitcoin-cli")), }; Ok(BitcoindConf { bitcoind_path, bitcoincli_path, additional_args, p2p_port: ports[0], rpc_port: ports[1], net, chain, }) } } impl BitcoindInstance { pub fn setup(conf: BitcoindConf) -> Result<Self> { let instance_dir = TempDir::new("bitcoind_test_dir") .wrap_err(BitcoindError::TestInstance)? .into_path(); let datadir = instance_dir.join("datadir"); std::fs::create_dir(&datadir).wrap_err(BitcoindError::TestInstance)?; Self::start(instance_dir, datadir, conf) } pub fn start( instance_dir: PathBuf, datadir: impl AsRef<Path>, conf: BitcoindConf, ) -> Result<Self> { let mut datadir_arg = OsString::from_str("-datadir=").unwrap(); datadir_arg.push(datadir.as_ref().as_os_str()); let datadir = datadir.as_ref(); println!("{}", datadir.to_str().unwrap()); let stdout = File::create(instance_dir.join("stdout.txt")).wrap_err(BitcoindError::TestInstance)?; let stderr = File::create(instance_dir.join("stderr.txt")).wrap_err(BitcoindError::TestInstance)?; let rpc_user = "user"; let rpc_pass = "pass"; let bitcoin_conf_str = format!( "\ {net_line} server=1 rpcuser={rpc_user} rpcpassword={rpc_pass} {net_section_header} port={p2p_port} rpcport={rpc_port} ", net_line = net_conf_line(conf.net), net_section_header = net_conf_section_header(conf.net), p2p_port = conf.p2p_port, rpc_port = conf.rpc_port ); let conf_path = match conf.chain { BitcoindChain::XPI => datadir.join("lotus.conf"), _ => datadir.join("bitcoin.conf"), }; { let mut bitcoin_conf = File::create(conf_path).wrap_err(BitcoindError::TestInstance)?; bitcoin_conf .write_all(bitcoin_conf_str.as_bytes()) .wrap_err(BitcoindError::TestInstance)?; bitcoin_conf.flush().wrap_err(BitcoindError::TestInstance)?; } let mut datadir_arg = OsString::from_str("-datadir=").unwrap(); datadir_arg.push(datadir.as_os_str()); let bitcoind_child = Command::new(&conf.bitcoind_path) .arg(&datadir_arg) .args(&conf.additional_args) .stdout(stdout) .stderr(stderr) .spawn() .wrap_err(BitcoindError::TestInstance)?; let cli = BitcoinCli { datadir_arg: datadir_arg.clone(), bitcoincli_path: conf.bitcoincli_path.clone(), }; let client = BitcoindRpcClient::new(BitcoindRpcClientConf { url: format!("http://127.0.0.1:{}", conf.rpc_port), rpc_user: rpc_user.to_string(), rpc_pass: rpc_pass.to_string(), }); Ok(BitcoindInstance { conf, instance_dir, datadir_arg, bitcoind_child, cli, client, }) } pub fn cli(&self) -> &BitcoinCli { &self.cli } pub fn rpc_client(&self) -> &BitcoindRpcClient { &self.client } fn shutdown_bitcoind(&mut self) -> Result<()> { self.bitcoind_child .kill() .wrap_err(BitcoindError::TestInstance)?; self.bitcoind_child .wait() .wrap_err(BitcoindError::TestInstance)?; Ok(()) } pub fn restart_with_args(&mut self, args: &[OsString]) -> Result<()> { self.shutdown_bitcoind()?; let stdout = File::create(self.instance_dir.join("stdout1.txt")) .wrap_err(BitcoindError::TestInstance)?; let stderr = File::create(self.instance_dir.join("stderr1.txt")) .wrap_err(BitcoindError::TestInstance)?; let bitcoind_child = Command::new(&self.conf.bitcoind_path) .arg(&self.datadir_arg) .args(args) .stdout(stdout) .stderr(stderr) .spawn() .wrap_err(BitcoindError::TestInstance)?; self.bitcoind_child = bitcoind_child; Ok(()) }
pub fn cmd_output(&self, cmd: &str, args: &[&str]) -> Result<Output> { self.cli.cmd_output(cmd, args) } pub fn cmd_string(&self, cmd: &str, args: &[&str]) -> Result<String> { self.cli.cmd_string(cmd, args) } pub fn cmd_json(&self, cmd: &str, args: &[&str]) -> Result<json::JsonValue> { self.cli.cmd_json(cmd, args) } fn _ensure_bitcoind(&mut self) -> Result<()> { if self .bitcoind_child .try_wait() .wrap_err(BitcoindError::TestInstance)? .is_some() { return Err(BitcoindError::BitcoindExited.into()); } Ok(()) } pub fn p2p_port(&self) -> u16 { self.conf.p2p_port } pub fn wait_for_ready(&mut self) -> Result<()> { for _ in 0..100 { self._ensure_bitcoind()?; std::thread::sleep(Duration::from_millis(50)); let output = self.cmd_output("getblockcount", &[])?; if output.status.success() { return Ok(()); } } Err(BitcoindError::Timeout("bitcoind".into()).into()) } pub fn cleanup(&self) -> Result<()> { std::fs::remove_dir_all(&self.instance_dir).wrap_err(BitcoindError::TestInstance) } } impl Drop for BitcoindInstance { fn drop(&mut self) { if let Ok(None) = self.bitcoind_child.try_wait() { if let Err(err) = self.shutdown_bitcoind() { eprintln!("Failed to shut down bitcoind: {}", err); } } } } fn net_conf_line(net: Net) -> &'static str { match net { Net::Mainnet => "", Net::Regtest => "regtest=1", } } fn net_conf_section_header(net: Net) -> &'static str { match net { Net::Mainnet => "", Net::Regtest => "[regtest]", } }
library.js
//"use strict"; // An implementation of basic necessary libraries for the web. This integrates // with a compiled libc and with the rest of the JS runtime. // // We search the Library object when there is an external function. If the // entry in the Library is a function, we insert it. If it is a string, we // do another lookup in the library (a simple way to write a function once, // if it can be called by different names). We also allow dependencies, // using __deps. Initialization code to be run after allocating all // global constants can be defined by __postset. // // Note that the full function name will be '_' + the name in the Library // object. For convenience, the short name appears here. Note that if you add a // new function with an '_', it will not be found. // Memory allocated during startup, in postsets, should only be ALLOC_STATIC LibraryManager.library = { // keep this low in memory, because we flatten arrays with them in them #if USE_PTHREADS stdin: '; if (ENVIRONMENT_IS_PTHREAD) _stdin = PthreadWorkerInit._stdin; else PthreadWorkerInit._stdin = _stdin = allocate(1, "i32*", ALLOC_STATIC)', stdout: '; if (ENVIRONMENT_IS_PTHREAD) _stdout = PthreadWorkerInit._stdout; else PthreadWorkerInit._stdout = _stdout = allocate(1, "i32*", ALLOC_STATIC)', stderr: '; if (ENVIRONMENT_IS_PTHREAD) _stderr = PthreadWorkerInit._stderr; else PthreadWorkerInit._stderr = _stderr = allocate(1, "i32*", ALLOC_STATIC)', _impure_ptr: '; if (ENVIRONMENT_IS_PTHREAD) __impure_ptr = PthreadWorkerInit.__impure_ptr; else PthreadWorkerInit.__impure_ptr __impure_ptr = allocate(1, "i32*", ALLOC_STATIC)', __dso_handle: '; if (ENVIRONMENT_IS_PTHREAD) ___dso_handle = PthreadWorkerInit.___dso_handle; else PthreadWorkerInit.___dso_handle = ___dso_handle = allocate(1, "i32*", ALLOC_STATIC)', #else stdin: '{{{ makeStaticAlloc(1) }}}', stdout: '{{{ makeStaticAlloc(1) }}}', stderr: '{{{ makeStaticAlloc(1) }}}', _impure_ptr: '{{{ makeStaticAlloc(1) }}}', __dso_handle: '{{{ makeStaticAlloc(1) }}}', #endif $PROCINFO: { // permissions /* uid: 0, gid: 0, euid: 0, egid: 0, suid: 0, sgid: 0, fsuid: 0, fsgid: 0, */ // process identification ppid: 1, pid: 42, sid: 42, pgid: 42 }, // ========================================================================== // utime.h // ========================================================================== utime__deps: ['$FS', '__setErrNo', '$ERRNO_CODES'], utime__proxy: 'sync', utime__sig: 'iii', utime: function(path, times) { // int utime(const char *path, const struct utimbuf *times); // http://pubs.opengroup.org/onlinepubs/009695399/basedefs/utime.h.html var time; if (times) { // NOTE: We don't keep track of access timestamps. var offset = {{{ C_STRUCTS.utimbuf.modtime }}}; time = {{{ makeGetValue('times', 'offset', 'i32') }}}; time *= 1000; } else { time = Date.now(); } path = Pointer_stringify(path); try { FS.utime(path, time, time); return 0; } catch (e) { FS.handleFSError(e); return -1; } }, utimes__deps: ['$FS', '__setErrNo', '$ERRNO_CODES'], utimes__proxy: 'sync', utimes__sig: 'iii', utimes: function(path, times) { var time; if (times) { var offset = {{{ C_STRUCTS.timeval.__size__ }}} + {{{ C_STRUCTS.timeval.tv_sec }}}; time = {{{ makeGetValue('times', 'offset', 'i32') }}} * 1000; offset = {{{ C_STRUCTS.timeval.__size__ }}} + {{{ C_STRUCTS.timeval.tv_usec }}}; time += {{{ makeGetValue('times', 'offset', 'i32') }}} / 1000; } else { time = Date.now(); } path = Pointer_stringify(path); try { FS.utime(path, time, time); return 0; } catch (e) { FS.handleFSError(e); return -1; } }, // ========================================================================== // sys/file.h // ========================================================================== flock: function(fd, operation) { // int flock(int fd, int operation); // Pretend to succeed return 0; }, chroot__deps: ['__setErrNo', '$ERRNO_CODES'], chroot__proxy: 'sync', chroot__sig: 'ii', chroot: function(path) { // int chroot(const char *path); // http://pubs.opengroup.org/onlinepubs/7908799/xsh/chroot.html ___setErrNo(ERRNO_CODES.EACCES); return -1; }, fpathconf__deps: ['__setErrNo', '$ERRNO_CODES'], fpathconf__proxy: 'sync', fpathconf__sig: 'iii', fpathconf: function(fildes, name) { // long fpathconf(int fildes, int name); // http://pubs.opengroup.org/onlinepubs/000095399/functions/encrypt.html // NOTE: The first parameter is ignored, so pathconf == fpathconf. // The constants here aren't real values. Just mimicking glibc. switch (name) { case {{{ cDefine('_PC_LINK_MAX') }}}: return 32000; case {{{ cDefine('_PC_MAX_CANON') }}}: case {{{ cDefine('_PC_MAX_INPUT') }}}: case {{{ cDefine('_PC_NAME_MAX') }}}: return 255; case {{{ cDefine('_PC_PATH_MAX') }}}: case {{{ cDefine('_PC_PIPE_BUF') }}}: case {{{ cDefine('_PC_REC_MIN_XFER_SIZE') }}}: case {{{ cDefine('_PC_REC_XFER_ALIGN') }}}: case {{{ cDefine('_PC_ALLOC_SIZE_MIN') }}}: return 4096; case {{{ cDefine('_PC_CHOWN_RESTRICTED') }}}: case {{{ cDefine('_PC_NO_TRUNC') }}}: case {{{ cDefine('_PC_2_SYMLINKS') }}}: return 1; case {{{ cDefine('_PC_VDISABLE') }}}: return 0; case {{{ cDefine('_PC_SYNC_IO') }}}: case {{{ cDefine('_PC_ASYNC_IO') }}}: case {{{ cDefine('_PC_PRIO_IO') }}}: case {{{ cDefine('_PC_SOCK_MAXBUF') }}}: case {{{ cDefine('_PC_REC_INCR_XFER_SIZE') }}}: case {{{ cDefine('_PC_REC_MAX_XFER_SIZE') }}}: case {{{ cDefine('_PC_SYMLINK_MAX') }}}: return -1; case {{{ cDefine('_PC_FILESIZEBITS') }}}: return 64; } ___setErrNo(ERRNO_CODES.EINVAL); return -1; }, pathconf: 'fpathconf', confstr__deps: ['__setErrNo', '$ERRNO_CODES', '$ENV'], confstr__proxy: 'sync', confstr__sig: 'iiii', confstr: function(name, buf, len) { // size_t confstr(int name, char *buf, size_t len); // http://pubs.opengroup.org/onlinepubs/000095399/functions/confstr.html var value; switch (name) { case {{{ cDefine('_CS_PATH') }}}: value = ENV['PATH'] || '/'; break; case {{{ cDefine('_CS_POSIX_V6_WIDTH_RESTRICTED_ENVS') }}}: // Mimicking glibc. value = 'POSIX_V6_ILP32_OFF32\nPOSIX_V6_ILP32_OFFBIG'; break; case {{{ cDefine('_CS_GNU_LIBC_VERSION') }}}: // This JS implementation was tested against this glibc version. value = 'glibc 2.14'; break; case {{{ cDefine('_CS_GNU_LIBPTHREAD_VERSION') }}}: // We don't support pthreads. value = ''; break; case {{{ cDefine('_CS_POSIX_V6_ILP32_OFF32_LIBS') }}}: case {{{ cDefine('_CS_POSIX_V6_ILP32_OFFBIG_LIBS') }}}: case {{{ cDefine('_CS_POSIX_V6_LP64_OFF64_CFLAGS') }}}: case {{{ cDefine('_CS_POSIX_V6_LP64_OFF64_LDFLAGS') }}}: case {{{ cDefine('_CS_POSIX_V6_LP64_OFF64_LIBS') }}}: case {{{ cDefine('_CS_POSIX_V6_LPBIG_OFFBIG_CFLAGS') }}}: case {{{ cDefine('_CS_POSIX_V6_LPBIG_OFFBIG_LDFLAGS') }}}: case {{{ cDefine('_CS_POSIX_V6_LPBIG_OFFBIG_LIBS') }}}: value = ''; break; case {{{ cDefine('_CS_POSIX_V6_ILP32_OFF32_CFLAGS') }}}: case {{{ cDefine('_CS_POSIX_V6_ILP32_OFF32_LDFLAGS') }}}: case {{{ cDefine('_CS_POSIX_V6_ILP32_OFFBIG_LDFLAGS') }}}: value = '-m32'; break; case {{{ cDefine('_CS_POSIX_V6_ILP32_OFFBIG_CFLAGS') }}}: value = '-m32 -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64'; break; default: ___setErrNo(ERRNO_CODES.EINVAL); return 0; } if (len == 0 || buf == 0) { return value.length + 1; } else { var length = Math.min(len, value.length); for (var i = 0; i < length; i++) { {{{ makeSetValue('buf', 'i', 'value.charCodeAt(i)', 'i8') }}}; } if (len > length) {{{ makeSetValue('buf', 'i++', '0', 'i8') }}}; return i; } }, execl__deps: ['__setErrNo', '$ERRNO_CODES'], execl: function(/* ... */) { // int execl(const char *path, const char *arg0, ... /*, (char *)0 */); // http://pubs.opengroup.org/onlinepubs/009695399/functions/exec.html // We don't support executing external code. ___setErrNo(ERRNO_CODES.ENOEXEC); return -1; }, execle: 'execl', execlp: 'execl', execv: 'execl', execve: 'execl', execvp: 'execl', __execvpe: 'execl', fexecve: 'execl', _exit: function(status) { // void _exit(int status); // http://pubs.opengroup.org/onlinepubs/000095399/functions/exit.html Module['exit'](status); }, fork__deps: ['__setErrNo', '$ERRNO_CODES'], fork: function() { // pid_t fork(void); // http://pubs.opengroup.org/onlinepubs/000095399/functions/fork.html // We don't support multiple processes. ___setErrNo(ERRNO_CODES.EAGAIN); return -1; }, vfork: 'fork', posix_spawn: 'fork', posix_spawnp: 'fork', setgroups__deps: ['__setErrNo', '$ERRNO_CODES', 'sysconf'], setgroups: function(ngroups, gidset) { // int setgroups(int ngroups, const gid_t *gidset); // https://developer.apple.com/library/mac/#documentation/Darwin/Reference/ManPages/man2/setgroups.2.html if (ngroups < 1 || ngroups > _sysconf({{{ cDefine('_SC_NGROUPS_MAX') }}})) { ___setErrNo(ERRNO_CODES.EINVAL); return -1; } else { // We have just one process/user/group, so it makes no sense to set groups. ___setErrNo(ERRNO_CODES.EPERM); return -1; } }, getpagesize: function() { // int getpagesize(void); return PAGE_SIZE; }, sysconf__deps: ['__setErrNo', '$ERRNO_CODES'], sysconf__proxy: 'sync', sysconf__sig: 'ii', sysconf: function(name) { // long sysconf(int name); // http://pubs.opengroup.org/onlinepubs/009695399/functions/sysconf.html switch(name) { case {{{ cDefine('_SC_PAGE_SIZE') }}}: return PAGE_SIZE; case {{{ cDefine('_SC_PHYS_PAGES') }}}: #if BINARYEN var maxHeapSize = 2*1024*1024*1024 - 65536; #else var maxHeapSize = 2*1024*1024*1024 - 16777216; #endif #if WASM_MEM_MAX != -1 maxHeapSize = {{{ WASM_MEM_MAX }}}; #endif #if !ALLOW_MEMORY_GROWTH maxHeapSize = HEAPU8.length; #endif return maxHeapSize / PAGE_SIZE; case {{{ cDefine('_SC_ADVISORY_INFO') }}}: case {{{ cDefine('_SC_BARRIERS') }}}: case {{{ cDefine('_SC_ASYNCHRONOUS_IO') }}}: case {{{ cDefine('_SC_CLOCK_SELECTION') }}}: case {{{ cDefine('_SC_CPUTIME') }}}: case {{{ cDefine('_SC_FSYNC') }}}: case {{{ cDefine('_SC_IPV6') }}}: case {{{ cDefine('_SC_MAPPED_FILES') }}}: case {{{ cDefine('_SC_MEMLOCK') }}}: case {{{ cDefine('_SC_MEMLOCK_RANGE') }}}: case {{{ cDefine('_SC_MEMORY_PROTECTION') }}}: case {{{ cDefine('_SC_MESSAGE_PASSING') }}}: case {{{ cDefine('_SC_MONOTONIC_CLOCK') }}}: case {{{ cDefine('_SC_PRIORITIZED_IO') }}}: case {{{ cDefine('_SC_PRIORITY_SCHEDULING') }}}: case {{{ cDefine('_SC_RAW_SOCKETS') }}}: case {{{ cDefine('_SC_READER_WRITER_LOCKS') }}}: case {{{ cDefine('_SC_REALTIME_SIGNALS') }}}: case {{{ cDefine('_SC_SEMAPHORES') }}}: case {{{ cDefine('_SC_SHARED_MEMORY_OBJECTS') }}}: case {{{ cDefine('_SC_SPAWN') }}}: case {{{ cDefine('_SC_SPIN_LOCKS') }}}: case {{{ cDefine('_SC_SYNCHRONIZED_IO') }}}: case {{{ cDefine('_SC_THREAD_ATTR_STACKADDR') }}}: case {{{ cDefine('_SC_THREAD_ATTR_STACKSIZE') }}}: case {{{ cDefine('_SC_THREAD_CPUTIME') }}}: case {{{ cDefine('_SC_THREAD_PRIO_INHERIT') }}}: case {{{ cDefine('_SC_THREAD_PRIO_PROTECT') }}}: case {{{ cDefine('_SC_THREAD_PROCESS_SHARED') }}}: case {{{ cDefine('_SC_THREAD_SAFE_FUNCTIONS') }}}: case {{{ cDefine('_SC_THREADS') }}}: case {{{ cDefine('_SC_TIMEOUTS') }}}: case {{{ cDefine('_SC_TIMERS') }}}: case {{{ cDefine('_SC_VERSION') }}}: case {{{ cDefine('_SC_2_C_BIND') }}}: case {{{ cDefine('_SC_2_C_DEV') }}}: case {{{ cDefine('_SC_2_CHAR_TERM') }}}: case {{{ cDefine('_SC_2_LOCALEDEF') }}}: case {{{ cDefine('_SC_2_SW_DEV') }}}: case {{{ cDefine('_SC_2_VERSION') }}}: return 200809; case {{{ cDefine('_SC_THREAD_PRIORITY_SCHEDULING') }}}: return 0; case {{{ cDefine('_SC_MQ_OPEN_MAX') }}}: case {{{ cDefine('_SC_XOPEN_STREAMS') }}}: case {{{ cDefine('_SC_XBS5_LP64_OFF64') }}}: case {{{ cDefine('_SC_XBS5_LPBIG_OFFBIG') }}}: case {{{ cDefine('_SC_AIO_LISTIO_MAX') }}}: case {{{ cDefine('_SC_AIO_MAX') }}}: case {{{ cDefine('_SC_SPORADIC_SERVER') }}}: case {{{ cDefine('_SC_THREAD_SPORADIC_SERVER') }}}: case {{{ cDefine('_SC_TRACE') }}}: case {{{ cDefine('_SC_TRACE_EVENT_FILTER') }}}: case {{{ cDefine('_SC_TRACE_EVENT_NAME_MAX') }}}: case {{{ cDefine('_SC_TRACE_INHERIT') }}}: case {{{ cDefine('_SC_TRACE_LOG') }}}: case {{{ cDefine('_SC_TRACE_NAME_MAX') }}}: case {{{ cDefine('_SC_TRACE_SYS_MAX') }}}: case {{{ cDefine('_SC_TRACE_USER_EVENT_MAX') }}}: case {{{ cDefine('_SC_TYPED_MEMORY_OBJECTS') }}}: case {{{ cDefine('_SC_V6_LP64_OFF64') }}}: case {{{ cDefine('_SC_V6_LPBIG_OFFBIG') }}}: case {{{ cDefine('_SC_2_FORT_DEV') }}}: case {{{ cDefine('_SC_2_FORT_RUN') }}}: case {{{ cDefine('_SC_2_PBS') }}}: case {{{ cDefine('_SC_2_PBS_ACCOUNTING') }}}: case {{{ cDefine('_SC_2_PBS_CHECKPOINT') }}}: case {{{ cDefine('_SC_2_PBS_LOCATE') }}}: case {{{ cDefine('_SC_2_PBS_MESSAGE') }}}: case {{{ cDefine('_SC_2_PBS_TRACK') }}}: case {{{ cDefine('_SC_2_UPE') }}}: case {{{ cDefine('_SC_THREAD_THREADS_MAX') }}}: case {{{ cDefine('_SC_SEM_NSEMS_MAX') }}}: case {{{ cDefine('_SC_SYMLOOP_MAX') }}}: case {{{ cDefine('_SC_TIMER_MAX') }}}: return -1; case {{{ cDefine('_SC_V6_ILP32_OFF32') }}}: case {{{ cDefine('_SC_V6_ILP32_OFFBIG') }}}: case {{{ cDefine('_SC_JOB_CONTROL') }}}: case {{{ cDefine('_SC_REGEXP') }}}: case {{{ cDefine('_SC_SAVED_IDS') }}}: case {{{ cDefine('_SC_SHELL') }}}: case {{{ cDefine('_SC_XBS5_ILP32_OFF32') }}}: case {{{ cDefine('_SC_XBS5_ILP32_OFFBIG') }}}: case {{{ cDefine('_SC_XOPEN_CRYPT') }}}: case {{{ cDefine('_SC_XOPEN_ENH_I18N') }}}: case {{{ cDefine('_SC_XOPEN_LEGACY') }}}: case {{{ cDefine('_SC_XOPEN_REALTIME') }}}: case {{{ cDefine('_SC_XOPEN_REALTIME_THREADS') }}}: case {{{ cDefine('_SC_XOPEN_SHM') }}}: case {{{ cDefine('_SC_XOPEN_UNIX') }}}: return 1; case {{{ cDefine('_SC_THREAD_KEYS_MAX') }}}: case {{{ cDefine('_SC_IOV_MAX') }}}: case {{{ cDefine('_SC_GETGR_R_SIZE_MAX') }}}: case {{{ cDefine('_SC_GETPW_R_SIZE_MAX') }}}: case {{{ cDefine('_SC_OPEN_MAX') }}}: return 1024; case {{{ cDefine('_SC_RTSIG_MAX') }}}: case {{{ cDefine('_SC_EXPR_NEST_MAX') }}}: case {{{ cDefine('_SC_TTY_NAME_MAX') }}}: return 32; case {{{ cDefine('_SC_ATEXIT_MAX') }}}: case {{{ cDefine('_SC_DELAYTIMER_MAX') }}}: case {{{ cDefine('_SC_SEM_VALUE_MAX') }}}: return 2147483647; case {{{ cDefine('_SC_SIGQUEUE_MAX') }}}: case {{{ cDefine('_SC_CHILD_MAX') }}}: return 47839; case {{{ cDefine('_SC_BC_SCALE_MAX') }}}: case {{{ cDefine('_SC_BC_BASE_MAX') }}}: return 99; case {{{ cDefine('_SC_LINE_MAX') }}}: case {{{ cDefine('_SC_BC_DIM_MAX') }}}: return 2048; case {{{ cDefine('_SC_ARG_MAX') }}}: return 2097152; case {{{ cDefine('_SC_NGROUPS_MAX') }}}: return 65536; case {{{ cDefine('_SC_MQ_PRIO_MAX') }}}: return 32768; case {{{ cDefine('_SC_RE_DUP_MAX') }}}: return 32767; case {{{ cDefine('_SC_THREAD_STACK_MIN') }}}: return 16384; case {{{ cDefine('_SC_BC_STRING_MAX') }}}: return 1000; case {{{ cDefine('_SC_XOPEN_VERSION') }}}: return 700; case {{{ cDefine('_SC_LOGIN_NAME_MAX') }}}: return 256; case {{{ cDefine('_SC_COLL_WEIGHTS_MAX') }}}: return 255; case {{{ cDefine('_SC_CLK_TCK') }}}: return 100; case {{{ cDefine('_SC_HOST_NAME_MAX') }}}: return 64; case {{{ cDefine('_SC_AIO_PRIO_DELTA_MAX') }}}: return 20; case {{{ cDefine('_SC_STREAM_MAX') }}}: return 16; case {{{ cDefine('_SC_TZNAME_MAX') }}}: return 6; case {{{ cDefine('_SC_THREAD_DESTRUCTOR_ITERATIONS') }}}: return 4; case {{{ cDefine('_SC_NPROCESSORS_ONLN') }}}: { if (typeof navigator === 'object') return navigator['hardwareConcurrency'] || 1; return 1; } } ___setErrNo(ERRNO_CODES.EINVAL); return -1; }, // Implement a Linux-like 'memory area' for our 'process'. // Changes the size of the memory area by |bytes|; returns the // address of the previous top ('break') of the memory area // We control the "dynamic" memory - DYNAMIC_BASE to DYNAMICTOP sbrk__asm: true, sbrk__sig: ['ii'], sbrk__deps: ['__setErrNo'], sbrk: function(increment) { increment = increment|0; var oldDynamicTop = 0; var oldDynamicTopOnChange = 0; var newDynamicTop = 0; var totalMemory = 0; increment = ((increment + 15) & -16)|0; #if USE_PTHREADS totalMemory = getTotalMemory()|0; // Perform a compare-and-swap loop to update the new dynamic top value. This is because // this function can becalled simultaneously in multiple threads. do { oldDynamicTop = Atomics_load(HEAP32, DYNAMICTOP_PTR>>2)|0; newDynamicTop = oldDynamicTop + increment | 0; // Asking to increase dynamic top to a too high value? In pthreads builds we cannot // enlarge memory, so this needs to fail. if (((increment|0) > 0 & (newDynamicTop|0) < (oldDynamicTop|0)) // Detect and fail if we would wrap around signed 32-bit int. | (newDynamicTop|0) < 0 // Also underflow, sbrk() should be able to be used to subtract. | (newDynamicTop|0) > (totalMemory|0)) { #if ABORTING_MALLOC abortOnCannotGrowMemory()|0; #else ___setErrNo({{{ cDefine('ENOMEM') }}}); return -1; #endif } // Attempt to update the dynamic top to new value. Another thread may have beat this thread to the update, // in which case we will need to start over by iterating the loop body again. oldDynamicTopOnChange = Atomics_compareExchange(HEAP32, DYNAMICTOP_PTR>>2, oldDynamicTop|0, newDynamicTop|0)|0; } while((oldDynamicTopOnChange|0) != (oldDynamicTop|0)); #else // singlethreaded build: (-s USE_PTHREADS=0) oldDynamicTop = HEAP32[DYNAMICTOP_PTR>>2]|0; newDynamicTop = oldDynamicTop + increment | 0; if (((increment|0) > 0 & (newDynamicTop|0) < (oldDynamicTop|0)) // Detect and fail if we would wrap around signed 32-bit int. | (newDynamicTop|0) < 0) { // Also underflow, sbrk() should be able to be used to subtract. #if ABORTING_MALLOC abortOnCannotGrowMemory()|0; #endif ___setErrNo({{{ cDefine('ENOMEM') }}}); return -1; } HEAP32[DYNAMICTOP_PTR>>2] = newDynamicTop; totalMemory = getTotalMemory()|0; if ((newDynamicTop|0) > (totalMemory|0)) { if ((enlargeMemory()|0) == 0) { HEAP32[DYNAMICTOP_PTR>>2] = oldDynamicTop; ___setErrNo({{{ cDefine('ENOMEM') }}}); return -1; } } #endif return oldDynamicTop|0; }, brk__asm: true, brk__sig: ['ii'], brk: function(newDynamicTop) { newDynamicTop = newDynamicTop|0; var oldDynamicTop = 0; var totalMemory = 0; #if USE_PTHREADS totalMemory = getTotalMemory()|0; // Asking to increase dynamic top to a too high value? In pthreads builds we cannot // enlarge memory, so this needs to fail. if ((newDynamicTop|0) < 0 | (newDynamicTop|0) > (totalMemory|0)) { #if ABORTING_MALLOC abortOnCannotGrowMemory()|0; #else ___setErrNo({{{ cDefine('ENOMEM') }}}); return -1; #endif } Atomics_store(HEAP32, DYNAMICTOP_PTR>>2, newDynamicTop|0)|0; #else // singlethreaded build: (-s USE_PTHREADS=0) if ((newDynamicTop|0) < 0) { #if ABORTING_MALLOC abortOnCannotGrowMemory()|0; #endif ___setErrNo({{{ cDefine('ENOMEM') }}}); return -1; } oldDynamicTop = HEAP32[DYNAMICTOP_PTR>>2]|0; HEAP32[DYNAMICTOP_PTR>>2] = newDynamicTop; totalMemory = getTotalMemory()|0; if ((newDynamicTop|0) > (totalMemory|0)) { if ((enlargeMemory()|0) == 0) { ___setErrNo({{{ cDefine('ENOMEM') }}}); HEAP32[DYNAMICTOP_PTR>>2] = oldDynamicTop; return -1; } } #endif return 0; }, system__deps: ['__setErrNo', '$ERRNO_CODES'], system: function(command) { // int system(const char *command); // http://pubs.opengroup.org/onlinepubs/000095399/functions/system.html // Can't call external programs. ___setErrNo(ERRNO_CODES.EAGAIN); return -1; }, // ========================================================================== // stdlib.h // ========================================================================== // tiny, fake malloc/free implementation. If the program actually uses malloc, // a compiled version will be used; this will only be used if the runtime // needs to allocate something, for which this is good enough if otherwise // no malloc is needed. malloc: function(bytes) { /* Over-allocate to make sure it is byte-aligned by 8. * This will leak memory, but this is only the dummy * implementation (replaced by dlmalloc normally) so * not an issue. */ #if ASSERTIONS == 2 warnOnce('using stub malloc (reference it from C to have the real one included)'); #endif var ptr = dynamicAlloc(bytes + 8); return (ptr+8) & 0xFFFFFFF8; }, free: function() { #if ASSERTIONS == 2 warnOnce('using stub free (reference it from C to have the real one included)'); #endif }, abs: 'Math_abs', labs: 'Math_abs', exit__deps: ['_exit'], exit: function(status) { __exit(status); }, _Exit__deps: ['exit'], _Exit: function(status) { __exit(status); }, _ZSt9terminatev__deps: ['exit'], _ZSt9terminatev: function() { _exit(-1234); }, atexit__proxy: 'sync', atexit__sig: 'ii', atexit: function(func, arg) { #if ASSERTIONS #if NO_EXIT_RUNTIME == 1 warnOnce('atexit() called, but NO_EXIT_RUNTIME is set, so atexits() will not be called. set NO_EXIT_RUNTIME to 0 (see the FAQ)'); #endif #endif __ATEXIT__.unshift({ func: func, arg: arg }); }, __cxa_atexit: 'atexit', // used in rust, clang when doing thread_local statics __cxa_thread_atexit: 'atexit', __cxa_thread_atexit_impl: 'atexit', abort: function() { Module['abort'](); }, environ__deps: ['$ENV'], #if USE_PTHREADS environ: '; if (ENVIRONMENT_IS_PTHREAD) _environ = PthreadWorkerInit._environ; else PthreadWorkerInit._environ = _environ = allocate(1, "i32*", ALLOC_STATIC)', #else environ: '{{{ makeStaticAlloc(1) }}}', #endif __environ__deps: ['environ'], __environ: 'environ', __buildEnvironment__deps: ['__environ'], __buildEnvironment: function(env) { // WARNING: Arbitrary limit! var MAX_ENV_VALUES = 64; var TOTAL_ENV_SIZE = 1024; // Statically allocate memory for the environment. var poolPtr; var envPtr; if (!___buildEnvironment.called) { ___buildEnvironment.called = true; // Set default values. Use string keys for Closure Compiler compatibility. ENV['USER'] = ENV['LOGNAME'] = 'web_user'; ENV['PATH'] = '/'; ENV['PWD'] = '/'; ENV['HOME'] = '/home/web_user'; ENV['LANG'] = 'C.UTF-8'; ENV['_'] = Module['thisProgram']; // Allocate memory. poolPtr = staticAlloc(TOTAL_ENV_SIZE); envPtr = staticAlloc(MAX_ENV_VALUES * {{{ Runtime.POINTER_SIZE }}}); {{{ makeSetValue('envPtr', '0', 'poolPtr', 'i8*') }}}; {{{ makeSetValue(makeGlobalUse('_environ'), 0, 'envPtr', 'i8*') }}}; } else { envPtr = {{{ makeGetValue(makeGlobalUse('_environ'), '0', 'i8**') }}}; poolPtr = {{{ makeGetValue('envPtr', '0', 'i8*') }}}; } // Collect key=value lines. var strings = []; var totalSize = 0; for (var key in env) { if (typeof env[key] === 'string') { var line = key + '=' + env[key]; strings.push(line); totalSize += line.length; } } if (totalSize > TOTAL_ENV_SIZE) { throw new Error('Environment size exceeded TOTAL_ENV_SIZE!'); } // Make new. var ptrSize = {{{ Runtime.getNativeTypeSize('i8*') }}}; for (var i = 0; i < strings.length; i++) { var line = strings[i]; writeAsciiToMemory(line, poolPtr); {{{ makeSetValue('envPtr', 'i * ptrSize', 'poolPtr', 'i8*') }}}; poolPtr += line.length + 1; } {{{ makeSetValue('envPtr', 'strings.length * ptrSize', '0', 'i8*') }}}; }, $ENV__deps: ['__buildEnvironment'], #if USE_PTHREADS $ENV__postset: 'if (!ENVIRONMENT_IS_PTHREAD) ___buildEnvironment(ENV);', #else $ENV__postset: '___buildEnvironment(ENV);', #endif $ENV: {}, getenv__deps: ['$ENV'], getenv__proxy: 'sync', getenv__sig: 'ii', getenv: function(name) { // char *getenv(const char *name); // http://pubs.opengroup.org/onlinepubs/009695399/functions/getenv.html if (name === 0) return 0; name = Pointer_stringify(name); if (!ENV.hasOwnProperty(name)) return 0; if (_getenv.ret) _free(_getenv.ret); _getenv.ret = allocateUTF8(ENV[name]); return _getenv.ret; }, clearenv__deps: ['$ENV', '__buildEnvironment'], clearenv__proxy: 'sync', clearenv__sig: 'i', clearenv: function() { // int clearenv (void); // http://www.gnu.org/s/hello/manual/libc/Environment-Access.html#index-clearenv-3107 ENV = {}; ___buildEnvironment(ENV); return 0; }, setenv__deps: ['$ENV', '__buildEnvironment', '$ERRNO_CODES', '__setErrNo'], setenv__proxy: 'sync', setenv__sig: 'iiii', setenv: function(envname, envval, overwrite) { // int setenv(const char *envname, const char *envval, int overwrite); // http://pubs.opengroup.org/onlinepubs/009695399/functions/setenv.html if (envname === 0) { ___setErrNo(ERRNO_CODES.EINVAL); return -1; } var name = Pointer_stringify(envname); var val = Pointer_stringify(envval); if (name === '' || name.indexOf('=') !== -1) { ___setErrNo(ERRNO_CODES.EINVAL); return -1; } if (ENV.hasOwnProperty(name) && !overwrite) return 0; ENV[name] = val; ___buildEnvironment(ENV); return 0; }, unsetenv__deps: ['$ENV', '__buildEnvironment', '$ERRNO_CODES', '__setErrNo'], unsetenv__proxy: 'sync', unsetenv__sig: 'ii', unsetenv: function(name) { // int unsetenv(const char *name); // http://pubs.opengroup.org/onlinepubs/009695399/functions/unsetenv.html if (name === 0) { ___setErrNo(ERRNO_CODES.EINVAL); return -1; } name = Pointer_stringify(name); if (name === '' || name.indexOf('=') !== -1) { ___setErrNo(ERRNO_CODES.EINVAL); return -1; } if (ENV.hasOwnProperty(name)) { delete ENV[name]; ___buildEnvironment(ENV); } return 0; }, putenv__deps: ['$ENV', '__buildEnvironment', '$ERRNO_CODES', '__setErrNo'], putenv__proxy: 'sync', putenv__sig: 'ii', putenv: function(string) { // int putenv(char *string); // http://pubs.opengroup.org/onlinepubs/009695399/functions/putenv.html // WARNING: According to the standard (and the glibc implementation), the // string is taken by reference so future changes are reflected. // We copy it instead, possibly breaking some uses. if (string === 0) { ___setErrNo(ERRNO_CODES.EINVAL); return -1; } string = Pointer_stringify(string); var splitPoint = string.indexOf('=') if (string === '' || string.indexOf('=') === -1) { ___setErrNo(ERRNO_CODES.EINVAL); return -1; } var name = string.slice(0, splitPoint); var value = string.slice(splitPoint + 1); if (!(name in ENV) || ENV[name] !== value) { ENV[name] = value; ___buildEnvironment(ENV); } return 0; }, getloadavg: function(loadavg, nelem) { // int getloadavg(double loadavg[], int nelem); // http://linux.die.net/man/3/getloadavg var limit = Math.min(nelem, 3); var doubleSize = {{{ Runtime.getNativeTypeSize('double') }}}; for (var i = 0; i < limit; i++) { {{{ makeSetValue('loadavg', 'i * doubleSize', '0.1', 'double') }}}; } return limit; }, // For compatibility, call to rand() when code requests arc4random(), although this is *not* at all // as strong as rc4 is. See https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man3/arc4random.3.html arc4random: 'rand', // ========================================================================== // string.h // ========================================================================== memcpy__inline: function(dest, src, num, align) { var ret = ''; ret += makeCopyValues(dest, src, num, 'null', null, align); return ret; }, emscripten_memcpy_big: function(dest, src, num) { HEAPU8.set(HEAPU8.subarray(src, src+num), dest); return dest; }, memcpy__asm: true, memcpy__sig: 'iiii', memcpy__deps: ['emscripten_memcpy_big'], memcpy: function(dest, src, num) { dest = dest|0; src = src|0; num = num|0; var ret = 0; var aligned_dest_end = 0; var block_aligned_dest_end = 0; var dest_end = 0; // Test against a benchmarked cutoff limit for when HEAPU8.set() becomes faster to use. if ((num|0) >= #if SIMD 196608 #else 8192 #endif ) { return _emscripten_memcpy_big(dest|0, src|0, num|0)|0; } ret = dest|0; dest_end = (dest + num)|0; if ((dest&3) == (src&3)) { // The initial unaligned < 4-byte front. while (dest & 3) { if ((num|0) == 0) return ret|0; {{{ makeSetValueAsm('dest', 0, makeGetValueAsm('src', 0, 'i8'), 'i8') }}}; dest = (dest+1)|0; src = (src+1)|0; num = (num-1)|0; } aligned_dest_end = (dest_end & -4)|0; block_aligned_dest_end = (aligned_dest_end - 64)|0; while ((dest|0) <= (block_aligned_dest_end|0) ) { #if SIMD SIMD_Int32x4_store(HEAPU8, dest, SIMD_Int32x4_load(HEAPU8, src)); SIMD_Int32x4_store(HEAPU8, dest+16, SIMD_Int32x4_load(HEAPU8, src+16)); SIMD_Int32x4_store(HEAPU8, dest+32, SIMD_Int32x4_load(HEAPU8, src+32)); SIMD_Int32x4_store(HEAPU8, dest+48, SIMD_Int32x4_load(HEAPU8, src+48)); #else {{{ makeSetValueAsm('dest', 0, makeGetValueAsm('src', 0, 'i32'), 'i32') }}}; {{{ makeSetValueAsm('dest', 4, makeGetValueAsm('src', 4, 'i32'), 'i32') }}}; {{{ makeSetValueAsm('dest', 8, makeGetValueAsm('src', 8, 'i32'), 'i32') }}}; {{{ makeSetValueAsm('dest', 12, makeGetValueAsm('src', 12, 'i32'), 'i32') }}}; {{{ makeSetValueAsm('dest', 16, makeGetValueAsm('src', 16, 'i32'), 'i32') }}}; {{{ makeSetValueAsm('dest', 20, makeGetValueAsm('src', 20, 'i32'), 'i32') }}}; {{{ makeSetValueAsm('dest', 24, makeGetValueAsm('src', 24, 'i32'), 'i32') }}}; {{{ makeSetValueAsm('dest', 28, makeGetValueAsm('src', 28, 'i32'), 'i32') }}}; {{{ makeSetValueAsm('dest', 32, makeGetValueAsm('src', 32, 'i32'), 'i32') }}}; {{{ makeSetValueAsm('dest', 36, makeGetValueAsm('src', 36, 'i32'), 'i32') }}}; {{{ makeSetValueAsm('dest', 40, makeGetValueAsm('src', 40, 'i32'), 'i32') }}}; {{{ makeSetValueAsm('dest', 44, makeGetValueAsm('src', 44, 'i32'), 'i32') }}}; {{{ makeSetValueAsm('dest', 48, makeGetValueAsm('src', 48, 'i32'), 'i32') }}}; {{{ makeSetValueAsm('dest', 52, makeGetValueAsm('src', 52, 'i32'), 'i32') }}}; {{{ makeSetValueAsm('dest', 56, makeGetValueAsm('src', 56, 'i32'), 'i32') }}}; {{{ makeSetValueAsm('dest', 60, makeGetValueAsm('src', 60, 'i32'), 'i32') }}}; #endif dest = (dest+64)|0; src = (src+64)|0; } while ((dest|0) < (aligned_dest_end|0) ) { {{{ makeSetValueAsm('dest', 0, makeGetValueAsm('src', 0, 'i32'), 'i32') }}}; dest = (dest+4)|0; src = (src+4)|0; } } else { // In the unaligned copy case, unroll a bit as well. aligned_dest_end = (dest_end - 4)|0; while ((dest|0) < (aligned_dest_end|0) ) { {{{ makeSetValueAsm('dest', 0, makeGetValueAsm('src', 0, 'i8'), 'i8') }}}; {{{ makeSetValueAsm('dest', 1, makeGetValueAsm('src', 1, 'i8'), 'i8') }}}; {{{ makeSetValueAsm('dest', 2, makeGetValueAsm('src', 2, 'i8'), 'i8') }}}; {{{ makeSetValueAsm('dest', 3, makeGetValueAsm('src', 3, 'i8'), 'i8') }}}; dest = (dest+4)|0; src = (src+4)|0; } } // The remaining unaligned < 4 byte tail. while ((dest|0) < (dest_end|0)) { {{{ makeSetValueAsm('dest', 0, makeGetValueAsm('src', 0, 'i8'), 'i8') }}}; dest = (dest+1)|0; src = (src+1)|0; } return ret|0; }, llvm_memcpy_i32: 'memcpy', llvm_memcpy_i64: 'memcpy', llvm_memcpy_p0i8_p0i8_i32: 'memcpy', llvm_memcpy_p0i8_p0i8_i64: 'memcpy', memmove__sig: 'iiii', memmove__asm: true, memmove__deps: ['memcpy'], memmove: function(dest, src, num) { dest = dest|0; src = src|0; num = num|0; var ret = 0; if (((src|0) < (dest|0)) & ((dest|0) < ((src + num)|0))) { // Unlikely case: Copy backwards in a safe manner ret = dest; src = (src + num)|0; dest = (dest + num)|0; while ((num|0) > 0) { dest = (dest - 1)|0; src = (src - 1)|0; num = (num - 1)|0; {{{ makeSetValueAsm('dest', 0, makeGetValueAsm('src', 0, 'i8'), 'i8') }}}; } dest = ret; } else { _memcpy(dest, src, num) | 0; } return dest | 0; }, llvm_memmove_i32: 'memmove', llvm_memmove_i64: 'memmove', llvm_memmove_p0i8_p0i8_i32: 'memmove', llvm_memmove_p0i8_p0i8_i64: 'memmove', memset__inline: function(ptr, value, num, align) { return makeSetValues(ptr, 0, value, 'null', num, align); }, memset__sig: 'iiii', memset__asm: true, memset: function(ptr, value, num) { ptr = ptr|0; value = value|0; num = num|0; var end = 0, aligned_end = 0, block_aligned_end = 0, value4 = 0; #if SIMD var value16 = SIMD_Int32x4(0,0,0,0); #endif end = (ptr + num)|0; value = value & 0xff; if ((num|0) >= 67 /* 64 bytes for an unrolled loop + 3 bytes for unaligned head*/) { while ((ptr&3) != 0) { {{{ makeSetValueAsm('ptr', 0, 'value', 'i8') }}}; ptr = (ptr+1)|0; } aligned_end = (end & -4)|0; block_aligned_end = (aligned_end - 64)|0; value4 = value | (value << 8) | (value << 16) | (value << 24); #if SIMD value16 = SIMD_Int32x4_splat(value4); #endif while((ptr|0) <= (block_aligned_end|0)) { #if SIMD SIMD_Int32x4_store(HEAPU8, ptr, value16); SIMD_Int32x4_store(HEAPU8, ptr+16, value16); SIMD_Int32x4_store(HEAPU8, ptr+32, value16); SIMD_Int32x4_store(HEAPU8, ptr+48, value16); #else {{{ makeSetValueAsm('ptr', 0, 'value4', 'i32') }}}; {{{ makeSetValueAsm('ptr', 4, 'value4', 'i32') }}}; {{{ makeSetValueAsm('ptr', 8, 'value4', 'i32') }}}; {{{ makeSetValueAsm('ptr', 12, 'value4', 'i32') }}}; {{{ makeSetValueAsm('ptr', 16, 'value4', 'i32') }}}; {{{ makeSetValueAsm('ptr', 20, 'value4', 'i32') }}}; {{{ makeSetValueAsm('ptr', 24, 'value4', 'i32') }}}; {{{ makeSetValueAsm('ptr', 28, 'value4', 'i32') }}}; {{{ makeSetValueAsm('ptr', 32, 'value4', 'i32') }}}; {{{ makeSetValueAsm('ptr', 36, 'value4', 'i32') }}}; {{{ makeSetValueAsm('ptr', 40, 'value4', 'i32') }}}; {{{ makeSetValueAsm('ptr', 44, 'value4', 'i32') }}}; {{{ makeSetValueAsm('ptr', 48, 'value4', 'i32') }}}; {{{ makeSetValueAsm('ptr', 52, 'value4', 'i32') }}}; {{{ makeSetValueAsm('ptr', 56, 'value4', 'i32') }}}; {{{ makeSetValueAsm('ptr', 60, 'value4', 'i32') }}}; #endif ptr = (ptr + 64)|0; } while ((ptr|0) < (aligned_end|0) ) { {{{ makeSetValueAsm('ptr', 0, 'value4', 'i32') }}}; ptr = (ptr+4)|0; } } // The remaining bytes. while ((ptr|0) < (end|0)) { {{{ makeSetValueAsm('ptr', 0, 'value', 'i8') }}}; ptr = (ptr+1)|0; } return (end-num)|0; }, llvm_memset_i32: 'memset', llvm_memset_p0i8_i32: 'memset', llvm_memset_p0i8_i64: 'memset', // ========================================================================== // GCC/LLVM specifics // ========================================================================== __builtin_prefetch: function(){}, // ========================================================================== // LLVM specifics // ========================================================================== llvm_va_start__inline: function(ptr) { // varargs - we received a pointer to the varargs as a final 'extra' parameter called 'varrp' // 2-word structure: struct { void* start; void* currentOffset; } return makeSetValue(ptr, 0, 'varrp', 'void*') + ';' + makeSetValue(ptr, Runtime.QUANTUM_SIZE, 0, 'void*'); }, llvm_va_end: function() {}, llvm_va_copy: function(ppdest, ppsrc) { // copy the list start {{{ makeCopyValues('ppdest', 'ppsrc', Runtime.QUANTUM_SIZE, 'null', null, 1) }}}; // copy the list's current offset (will be advanced with each call to va_arg) {{{ makeCopyValues('(ppdest+'+Runtime.QUANTUM_SIZE+')', '(ppsrc+'+Runtime.QUANTUM_SIZE+')', Runtime.QUANTUM_SIZE, 'null', null, 1) }}}; }, llvm_bswap_i16__asm: true, llvm_bswap_i16__sig: 'ii', llvm_bswap_i16: function(x) { x = x|0; return (((x&0xff)<<8) | ((x>>8)&0xff))|0; }, llvm_bswap_i32__asm: true, llvm_bswap_i32__sig: 'ii', llvm_bswap_i32: function(x) { x = x|0; return (((x&0xff)<<24) | (((x>>8)&0xff)<<16) | (((x>>16)&0xff)<<8) | (x>>>24))|0; }, llvm_bswap_i64__deps: ['llvm_bswap_i32'], llvm_bswap_i64: function(l, h) { var retl = _llvm_bswap_i32(h)>>>0; var reth = _llvm_bswap_i32(l)>>>0; {{{ makeStructuralReturn(['retl', 'reth']) }}}; }, llvm_ctlz_i8__asm: true, llvm_ctlz_i8__sig: 'ii', llvm_ctlz_i8: function(x, isZeroUndef) { x = x | 0; isZeroUndef = isZeroUndef | 0; return (Math_clz32(x & 0xff) | 0) - 24 | 0; }, llvm_ctlz_i16__asm: true, llvm_ctlz_i16__sig: 'ii', llvm_ctlz_i16: function(x, isZeroUndef) { x = x | 0; isZeroUndef = isZeroUndef | 0; return (Math_clz32(x & 0xffff) | 0) - 16 | 0 }, llvm_ctlz_i64__asm: true, llvm_ctlz_i64__sig: 'iii', llvm_ctlz_i64: function(l, h, isZeroUndef) { l = l | 0; h = h | 0; isZeroUndef = isZeroUndef | 0; var ret = 0; ret = Math_clz32(h) | 0; if ((ret | 0) == 32) ret = ret + (Math_clz32(l) | 0) | 0; {{{ makeSetTempRet0('0') }}}; return ret | 0; }, llvm_cttz_i32__deps: [function() { function cttz(x) { for (var i = 0; i < 8; i++) { if (x & (1 << i)) { return i; } } return 8; } if (SIDE_MODULE) return ''; // uses it from the parent #if USE_PTHREADS return 'var cttz_i8; if (ENVIRONMENT_IS_PTHREAD) cttz_i8 = PthreadWorkerInit.cttz_i8; else PthreadWorkerInit.cttz_i8 = cttz_i8 = allocate([' + range(256).map(function(x) { return cttz(x) }).join(',') + '], "i8", ALLOC_STATIC);'; #else return 'var cttz_i8 = allocate([' + range(256).map(function(x) { return cttz(x) }).join(',') + '], "i8", ALLOC_STATIC);'; #endif }], #if BINARYEN == 0 // binaryen will convert these calls to wasm anyhow llvm_cttz_i32__asm: true, #endif llvm_cttz_i32__sig: 'ii', llvm_cttz_i32: function(x) { x = x|0; var ret = 0; ret = {{{ makeGetValueAsm('cttz_i8', 'x & 0xff', 'i8') }}}; if ((ret|0) < 8) return ret|0; ret = {{{ makeGetValueAsm('cttz_i8', '(x >> 8)&0xff', 'i8') }}}; if ((ret|0) < 8) return (ret + 8)|0; ret = {{{ makeGetValueAsm('cttz_i8', '(x >> 16)&0xff', 'i8') }}}; if ((ret|0) < 8) return (ret + 16)|0; return ({{{ makeGetValueAsm('cttz_i8', 'x >>> 24', 'i8') }}} + 24)|0; }, llvm_cttz_i64__deps: ['llvm_cttz_i32'], llvm_cttz_i64: function(l, h) { var ret = _llvm_cttz_i32(l); if (ret == 32) ret += _llvm_cttz_i32(h); {{{ makeStructuralReturn(['ret', '0']) }}}; }, llvm_ctpop_i32__asm: true, llvm_ctpop_i32__sig: 'ii', llvm_ctpop_i32: function(x) { // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel // http://bits.stephan-brumme.com/countBits.html x = x | 0; x = x - ((x >>> 1) & 0x55555555) | 0; x = (x & 0x33333333) + ((x >>> 2) & 0x33333333) | 0; return (Math_imul((x + (x >>> 4) & 252645135 /* 0xF0F0F0F, but hits uglify parse bug? */), 0x1010101) >>> 24) | 0; }, llvm_ctpop_i64__deps: ['llvm_ctpop_i32'], llvm_ctpop_i64__asm: true, llvm_ctpop_i64__sig: 'iii', llvm_ctpop_i64: function(l, h) { l = l | 0; h = h | 0; return (_llvm_ctpop_i32(l) | 0) + (_llvm_ctpop_i32(h) | 0) | 0; }, llvm_trap: function() { abort('trap!'); }, llvm_prefetch: function(){}, __assert_fail: function(condition, filename, line, func) { abort('Assertion failed: ' + Pointer_stringify(condition) + ', at: ' + [filename ? Pointer_stringify(filename) : 'unknown filename', line, func ? Pointer_stringify(func) : 'unknown function']); }, __assert_func: function(filename, line, func, condition) { abort('Assertion failed: ' + (condition ? Pointer_stringify(condition) : 'unknown condition') + ', at: ' + [filename ? Pointer_stringify(filename) : 'unknown filename', line, func ? Pointer_stringify(func) : 'unknown function']); }, $EXCEPTIONS: { last: 0, caught: [], infos: {}, deAdjust: function(adjusted) { if (!adjusted || EXCEPTIONS.infos[adjusted]) return adjusted; for (var ptr in EXCEPTIONS.infos) { var info = EXCEPTIONS.infos[ptr]; if (info.adjusted === adjusted) { #if EXCEPTION_DEBUG Module.printErr('de-adjusted exception ptr ' + adjusted + ' to ' + ptr); #endif return ptr; } } #if EXCEPTION_DEBUG Module.printErr('no de-adjustment for unknown exception ptr ' + adjusted); #endif return adjusted; }, addRef: function(ptr) { #if EXCEPTION_DEBUG Module.printErr('addref ' + ptr); #endif if (!ptr) return; var info = EXCEPTIONS.infos[ptr]; info.refcount++; }, decRef: function(ptr) { #if EXCEPTION_DEBUG Module.printErr('decref ' + ptr); #endif if (!ptr) return; var info = EXCEPTIONS.infos[ptr]; assert(info.refcount > 0); info.refcount--; // A rethrown exception can reach refcount 0; it must not be discarded // Its next handler will clear the rethrown flag and addRef it, prior to // final decRef and destruction here if (info.refcount === 0 && !info.rethrown) { if (info.destructor) { #if WASM_BACKEND == 0 Module['dynCall_vi'](info.destructor, ptr); #else // In Wasm, destructors return 'this' as in ARM Module['dynCall_ii'](info.destructor, ptr); #endif } delete EXCEPTIONS.infos[ptr]; ___cxa_free_exception(ptr); #if EXCEPTION_DEBUG Module.printErr('decref freeing exception ' + [ptr, EXCEPTIONS.last, 'stack', EXCEPTIONS.caught]); #endif } }, clearRef: function(ptr) { if (!ptr) return; var info = EXCEPTIONS.infos[ptr]; info.refcount = 0; }, }, // Exceptions __cxa_allocate_exception__deps: ['malloc'], __cxa_allocate_exception: function(size) { return _malloc(size); }, __cxa_free_exception__deps: ['free'], __cxa_free_exception: function(ptr) { try { return _free(ptr); } catch(e) { // XXX FIXME #if ASSERTIONS Module.printErr('exception during cxa_free_exception: ' + e); #endif } }, __cxa_increment_exception_refcount__deps: ['$EXCEPTIONS'], __cxa_increment_exception_refcount: function(ptr) { EXCEPTIONS.addRef(EXCEPTIONS.deAdjust(ptr)); }, __cxa_decrement_exception_refcount__deps: ['$EXCEPTIONS'], __cxa_decrement_exception_refcount: function(ptr) { EXCEPTIONS.decRef(EXCEPTIONS.deAdjust(ptr)); }, // Here, we throw an exception after recording a couple of values that we need to remember // We also remember that it was the last exception thrown as we need to know that later. __cxa_throw__sig: 'viii', __cxa_throw__deps: ['_ZSt18uncaught_exceptionv', '__cxa_find_matching_catch', '$EXCEPTIONS'], __cxa_throw: function(ptr, type, destructor) { #if EXCEPTION_DEBUG Module.printErr('Compiled code throwing an exception, ' + [ptr,type,destructor]); #endif EXCEPTIONS.infos[ptr] = { ptr: ptr, adjusted: ptr, type: type, destructor: destructor, refcount: 0, caught: false, rethrown: false }; EXCEPTIONS.last = ptr; if (!("uncaught_exception" in __ZSt18uncaught_exceptionv)) { __ZSt18uncaught_exceptionv.uncaught_exception = 1; } else { __ZSt18uncaught_exceptionv.uncaught_exception++; } {{{ makeThrow('ptr') }}} }, // This exception will be caught twice, but while begin_catch runs twice, // we early-exit from end_catch when the exception has been rethrown, so // pop that here from the caught exceptions. __cxa_rethrow__deps: ['__cxa_end_catch', '$EXCEPTIONS'], __cxa_rethrow: function() { var ptr = EXCEPTIONS.caught.pop(); if (!EXCEPTIONS.infos[ptr].rethrown) { // Only pop if the corresponding push was through rethrow_primary_exception EXCEPTIONS.caught.push(ptr) EXCEPTIONS.infos[ptr].rethrown = true; } #if EXCEPTION_DEBUG Module.printErr('Compiled code RE-throwing an exception, popped ' + [ptr, EXCEPTIONS.last, 'stack', EXCEPTIONS.caught]); #endif EXCEPTIONS.last = ptr; {{{ makeThrow('ptr') }}} }, llvm_eh_exception__deps: ['$EXCEPTIONS'], llvm_eh_exception: function() { return EXCEPTIONS.last; }, llvm_eh_selector__jsargs: true, llvm_eh_selector__deps: ['$EXCEPTIONS'], llvm_eh_selector: function(unused_exception_value, personality/*, varargs*/) { var type = EXCEPTIONS.last; for (var i = 2; i < arguments.length; i++) { if (arguments[i] == type) return type; } return 0; }, llvm_eh_typeid_for: function(type) { return type; }, __cxa_begin_catch__deps: ['_ZSt18uncaught_exceptionv', '$EXCEPTIONS'], __cxa_begin_catch: function(ptr) { var info = EXCEPTIONS.infos[ptr]; if (info && !info.caught) { info.caught = true; __ZSt18uncaught_exceptionv.uncaught_exception--; } if (info) info.rethrown = false; EXCEPTIONS.caught.push(ptr); #if EXCEPTION_DEBUG Module.printErr('cxa_begin_catch ' + [ptr, 'stack', EXCEPTIONS.caught]); #endif EXCEPTIONS.addRef(EXCEPTIONS.deAdjust(ptr)); return ptr; }, // We're done with a catch. Now, we can run the destructor if there is one // and free the exception. Note that if the dynCall on the destructor fails // due to calling apply on undefined, that means that the destructor is // an invalid index into the FUNCTION_TABLE, so something has gone wrong. __cxa_end_catch__deps: ['__cxa_free_exception', '$EXCEPTIONS'], __cxa_end_catch: function() { // Clear state flag. Module['setThrew'](0); // Call destructor if one is registered then clear it. var ptr = EXCEPTIONS.caught.pop(); #if EXCEPTION_DEBUG Module.printErr('cxa_end_catch popped ' + [ptr, EXCEPTIONS.last, 'stack', EXCEPTIONS.caught]); #endif if (ptr) { EXCEPTIONS.decRef(EXCEPTIONS.deAdjust(ptr)); EXCEPTIONS.last = 0; // XXX in decRef? } }, __cxa_get_exception_ptr: function(ptr) { #if EXCEPTION_DEBUG Module.printErr('cxa_get_exception_ptr ' + ptr); #endif // TODO: use info.adjusted? return ptr; }, _ZSt18uncaught_exceptionv: function() { // std::uncaught_exception() return !!__ZSt18uncaught_exceptionv.uncaught_exception; }, __cxa_uncaught_exception__deps: ['_ZSt18uncaught_exceptionv'], __cxa_uncaught_exception: function() { return !!__ZSt18uncaught_exceptionv.uncaught_exception; }, __cxa_call_unexpected: function(exception) { Module.printErr('Unexpected exception thrown, this is not properly supported - aborting'); ABORT = true; throw exception; }, __cxa_current_primary_exception: function() { var ret = EXCEPTIONS.caught[EXCEPTIONS.caught.length-1] || 0; if (ret) EXCEPTIONS.addRef(EXCEPTIONS.deAdjust(ret)); return ret; }, __cxa_rethrow_primary_exception__deps: ['__cxa_rethrow'], __cxa_rethrow_primary_exception: function(ptr) { if (!ptr) return; EXCEPTIONS.caught.push(ptr); EXCEPTIONS.infos[ptr].rethrown = true; ___cxa_rethrow(); }, terminate: '__cxa_call_unexpected', __gxx_personality_v0__deps: ['_ZSt18uncaught_exceptionv', '__cxa_find_matching_catch'], __gxx_personality_v0: function() { }, __gcc_personality_v0: function() { }, // Finds a suitable catch clause for when an exception is thrown. // In normal compilers, this functionality is handled by the C++ // 'personality' routine. This is passed a fairly complex structure // relating to the context of the exception and makes judgements // about how to handle it. Some of it is about matching a suitable // catch clause, and some of it is about unwinding. We already handle // unwinding using 'if' blocks around each function, so the remaining // functionality boils down to picking a suitable 'catch' block. // We'll do that here, instead, to keep things simpler. __cxa_find_matching_catch__deps: ['__resumeException', '$EXCEPTIONS'], __cxa_find_matching_catch: function() { var thrown = EXCEPTIONS.last; if (!thrown) { // just pass through the null ptr {{{ makeStructuralReturn([0, 0]) }}}; } var info = EXCEPTIONS.infos[thrown]; var throwntype = info.type; if (!throwntype) { // just pass through the thrown ptr {{{ makeStructuralReturn(['thrown', 0]) }}}; } var typeArray = Array.prototype.slice.call(arguments); var pointer = Module['___cxa_is_pointer_type'](throwntype); // can_catch receives a **, add indirection if (!___cxa_find_matching_catch.buffer) ___cxa_find_matching_catch.buffer = _malloc(4); #if EXCEPTION_DEBUG Module.print("can_catch on " + [thrown]); #endif {{{ makeSetValue('___cxa_find_matching_catch.buffer', '0', 'thrown', '*') }}}; thrown = ___cxa_find_matching_catch.buffer; // The different catch blocks are denoted by different types. // Due to inheritance, those types may not precisely match the // type of the thrown object. Find one which matches, and // return the type of the catch block which should be called. for (var i = 0; i < typeArray.length; i++) { if (typeArray[i] && Module['___cxa_can_catch'](typeArray[i], throwntype, thrown)) { thrown = {{{ makeGetValue('thrown', '0', '*') }}}; // undo indirection info.adjusted = thrown; #if EXCEPTION_DEBUG Module.print(" can_catch found " + [thrown, typeArray[i]]); #endif {{{ makeStructuralReturn(['thrown', 'typeArray[i]']) }}}; } } // Shouldn't happen unless we have bogus data in typeArray // or encounter a type for which emscripten doesn't have suitable // typeinfo defined. Best-efforts match just in case. thrown = {{{ makeGetValue('thrown', '0', '*') }}}; // undo indirection {{{ makeStructuralReturn(['thrown', 'throwntype']) }}}; }, __resumeException__deps: ['$EXCEPTIONS', function() { Functions.libraryFunctions['___resumeException'] = 1 }], // will be called directly from compiled code __resumeException: function(ptr) { #if EXCEPTION_DEBUG Module.print("Resuming exception " + [ptr, EXCEPTIONS.last]); #endif if (!EXCEPTIONS.last) { EXCEPTIONS.last = ptr; } {{{ makeThrow('ptr') }}} }, llvm_stacksave: function() { var self = _llvm_stacksave; if (!self.LLVM_SAVEDSTACKS) { self.LLVM_SAVEDSTACKS = []; } self.LLVM_SAVEDSTACKS.push(stackSave()); return self.LLVM_SAVEDSTACKS.length-1; }, llvm_stackrestore: function(p) { var self = _llvm_stacksave; var ret = self.LLVM_SAVEDSTACKS[p]; self.LLVM_SAVEDSTACKS.splice(p, 1); stackRestore(ret); }, __cxa_pure_virtual: function() { ABORT = true; throw 'Pure virtual function called!'; }, llvm_flt_rounds: function() { return -1; // 'indeterminable' for FLT_ROUNDS }, llvm_expect_i32__inline: function(val, expected) { return '(' + val + ')'; }, llvm_lifetime_start: function() {}, llvm_lifetime_end: function() {}, llvm_invariant_start: function() {}, llvm_invariant_end: function() {}, llvm_objectsize_i32: function() { return -1 }, // TODO: support this llvm_dbg_declare__inline: function() { throw 'llvm_debug_declare' }, // avoid warning llvm_bitreverse_i32__asm: true, llvm_bitreverse_i32__sig: 'ii', llvm_bitreverse_i32: function(x) { x = x|0; x = ((x & 0xaaaaaaaa) >>> 1) | ((x & 0x55555555) << 1); x = ((x & 0xcccccccc) >>> 2) | ((x & 0x33333333) << 2); x = ((x & 0xf0f0f0f0) >>> 4) | ((x & 0x0f0f0f0f) << 4); x = ((x & 0xff00ff00) >>> 8) | ((x & 0x00ff00ff) << 8); return (x >>> 16) | (x << 16); }, // llvm-nacl llvm_nacl_atomic_store_i32__inline: true, llvm_nacl_atomic_cmpxchg_i8__inline: true, llvm_nacl_atomic_cmpxchg_i16__inline: true, llvm_nacl_atomic_cmpxchg_i32__inline: true, // ========================================================================== // llvm-mono integration // ========================================================================== llvm_mono_load_i8_p0i8: function(ptr) { return {{{ makeGetValue('ptr', 0, 'i8') }}}; }, llvm_mono_store_i8_p0i8: function(value, ptr) { {{{ makeSetValue('ptr', 0, 'value', 'i8') }}}; }, llvm_mono_load_i16_p0i16: function(ptr) { return {{{ makeGetValue('ptr', 0, 'i16') }}}; }, llvm_mono_store_i16_p0i16: function(value, ptr) { {{{ makeSetValue('ptr', 0, 'value', 'i16') }}}; }, llvm_mono_load_i32_p0i32: function(ptr) { return {{{ makeGetValue('ptr', 0, 'i32') }}}; }, llvm_mono_store_i32_p0i32: function(value, ptr) { {{{ makeSetValue('ptr', 0, 'value', 'i32') }}}; }, // ========================================================================== // math.h // ========================================================================== cos: 'Math_cos', cosf: 'Math_cos', cosl: 'Math_cos',
tan: 'Math_tan', tanf: 'Math_tan', tanl: 'Math_tan', acos: 'Math_acos', acosf: 'Math_acos', acosl: 'Math_acos', asin: 'Math_asin', asinf: 'Math_asin', asinl: 'Math_asin', atan: 'Math_atan', atanf: 'Math_atan', atanl: 'Math_atan', atan2: 'Math_atan2', atan2f: 'Math_atan2', atan2l: 'Math_atan2', exp: 'Math_exp', expf: 'Math_exp', expl: 'Math_exp', log: 'Math_log', logf: 'Math_log', logl: 'Math_log', sqrt: 'Math_sqrt', sqrtf: 'Math_sqrt', sqrtl: 'Math_sqrt', fabs: 'Math_abs', fabsf: 'Math_abs', fabsl: 'Math_abs', llvm_fabs_f32: 'Math_abs', llvm_fabs_f64: 'Math_abs', ceil: 'Math_ceil', ceilf: 'Math_ceil', ceill: 'Math_ceil', floor: 'Math_floor', floorf: 'Math_floor', floorl: 'Math_floor', pow: 'Math_pow', powf: 'Math_pow', powl: 'Math_pow', llvm_sqrt_f32: 'Math_sqrt', llvm_sqrt_f64: 'Math_sqrt', llvm_pow_f32: 'Math_pow', llvm_pow_f64: 'Math_pow', llvm_powi_f32: 'Math_pow', llvm_powi_f64: 'Math_pow', llvm_log_f32: 'Math_log', llvm_log_f64: 'Math_log', llvm_exp_f32: 'Math_exp', llvm_exp_f64: 'Math_exp', llvm_cos_f32: 'Math_cos', llvm_cos_f64: 'Math_cos', llvm_sin_f32: 'Math_sin', llvm_sin_f64: 'Math_sin', llvm_trunc_f32: 'Math_trunc', llvm_trunc_f64: 'Math_trunc', llvm_ceil_f32: 'Math_ceil', llvm_ceil_f64: 'Math_ceil', llvm_floor_f32: 'Math_floor', llvm_floor_f64: 'Math_floor', llvm_round_f32: 'Math_round', llvm_round_f64: 'Math_round', llvm_minnum_f32: 'Math_min', llvm_minnum_f64: 'Math_min', llvm_maxnum_f32: 'Math_max', llvm_maxnum_f64: 'Math_max', llvm_exp2_f32: function(x) { return Math.pow(2, x); }, llvm_exp2_f64: 'llvm_exp2_f32', llvm_log2_f32: function(x) { return Math.log(x) / Math.LN2; // TODO: Math.log2, when browser support is there }, llvm_log2_f64: 'llvm_log2_f32', llvm_log10_f32: function(x) { return Math.log(x) / Math.LN10; // TODO: Math.log10, when browser support is there }, llvm_log10_f64: 'llvm_log10_f32', llvm_copysign_f32: function(x, y) { return y < 0 || (y === 0 && 1/y < 0) ? -Math_abs(x) : Math_abs(x); }, llvm_copysign_f64: function(x, y) { return y < 0 || (y === 0 && 1/y < 0) ? -Math_abs(x) : Math_abs(x); }, round__asm: true, round__sig: 'dd', round: function(d) { d = +d; return d >= +0 ? +Math_floor(d + +0.5) : +Math_ceil(d - +0.5); }, roundf__asm: true, roundf__sig: 'dd', roundf: function(f) { f = +f; return f >= +0 ? +Math_floor(f + +0.5) : +Math_ceil(f - +0.5); // TODO: use fround? }, _reallyNegative: function(x) { return x < 0 || (x === 0 && (1/x) === -Infinity); }, // ========================================================================== // dlfcn.h - Dynamic library loading // // Some limitations: // // * Minification on each file separately may not work, as they will // have different shortened names. You can in theory combine them, then // minify, then split... perhaps. // // * LLVM optimizations may fail. If the child wants to access a function // in the parent, LLVM opts may remove it from the parent when it is // being compiled. Not sure how to tell LLVM to not do so. // ========================================================================== $DLFCN: { error: null, errorMsg: null, loadedLibs: {}, // handle -> [refcount, name, lib_object] loadedLibNames: {}, // name -> handle }, // void* dlopen(const char* filename, int flag); dlopen__deps: ['$DLFCN', '$FS', '$ENV'], dlopen__proxy: 'sync', dlopen__sig: 'iii', dlopen: function(filename, flag) { #if MAIN_MODULE == 0 abort("To use dlopen, you need to use Emscripten's linking support, see https://github.com/kripken/emscripten/wiki/Linking"); #endif // void *dlopen(const char *file, int mode); // http://pubs.opengroup.org/onlinepubs/009695399/functions/dlopen.html var searchpaths = []; if (filename === 0) { filename = '__self__'; } else { var strfilename = Pointer_stringify(filename); var isValidFile = function (filename) { var target = FS.findObject(filename); return target && !target.isFolder && !target.isDevice; }; if (isValidFile(strfilename)) { filename = strfilename; } else { if (ENV['LD_LIBRARY_PATH']) { searchpaths = ENV['LD_LIBRARY_PATH'].split(':'); } for (var ident in searchpaths) { var searchfile = PATH.join2(searchpaths[ident],strfilename); if (isValidFile(searchfile)) { filename = searchfile; break; } } } } if (DLFCN.loadedLibNames[filename]) { // Already loaded; increment ref count and return. var handle = DLFCN.loadedLibNames[filename]; DLFCN.loadedLibs[handle].refcount++; return handle; } if (filename === '__self__') { var handle = -1; var lib_module = Module; var cached_functions = {}; } else { var target = FS.findObject(filename); if (!target || target.isFolder || target.isDevice) { DLFCN.errorMsg = 'Could not find dynamic lib: ' + filename; return 0; } FS.forceLoadFile(target); var lib_module; try { #if BINARYEN // the shared library is a shared wasm library (see tools/shared.py WebAssembly.make_shared_library) var lib_data = FS.readFile(filename, { encoding: 'binary' }); if (!(lib_data instanceof Uint8Array)) lib_data = new Uint8Array(lib_data); //Module.printErr('libfile ' + filename + ' size: ' + lib_data.length); lib_module = loadWebAssemblyModule(lib_data); #else // the shared library is a JS file, which we eval var lib_data = FS.readFile(filename, { encoding: 'utf8' }); lib_module = eval(lib_data)( alignFunctionTables(), Module ); #endif } catch (e) { #if ASSERTIONS Module.printErr('Error in loading dynamic library: ' + e); #endif DLFCN.errorMsg = 'Could not evaluate dynamic lib: ' + filename + '\n' + e; return 0; } // Not all browsers support Object.keys(). var handle = 1; for (var key in DLFCN.loadedLibs) { if (DLFCN.loadedLibs.hasOwnProperty(key)) handle++; } // We don't care about RTLD_NOW and RTLD_LAZY. if (flag & 256) { // RTLD_GLOBAL for (var ident in lib_module) { if (lib_module.hasOwnProperty(ident)) { // When RTLD_GLOBAL is enable, the symbols defined by this shared object will be made // available for symbol resolution of subsequently loaded shared objects. // // We should copy the symbols (which include methods and variables) from SIDE_MODULE to MAIN_MODULE. // // Module of SIDE_MODULE has not only the symbols (which should be copied) // but also others (print*, asmGlobal*, FUNCTION_TABLE_**, NAMED_GLOBALS, and so on). // // When the symbol (which should be copied) is method, Module._* 's type becomes function. // When the symbol (which should be copied) is variable, Module._* 's type becomes number. // // Except for the symbol prefix (_), there is no difference in the symbols (which should be copied) and others. // So this just copies over compiled symbols (which start with _). if (ident[0] == '_') { Module[ident] = lib_module[ident]; } } } } var cached_functions = {}; } DLFCN.loadedLibs[handle] = { refcount: 1, name: filename, module: lib_module, cached_functions: cached_functions }; DLFCN.loadedLibNames[filename] = handle; return handle; }, // int dlclose(void* handle); dlclose__deps: ['$DLFCN'], dlclose__proxy: 'sync', dlclose__sig: 'ii', dlclose: function(handle) { // int dlclose(void *handle); // http://pubs.opengroup.org/onlinepubs/009695399/functions/dlclose.html if (!DLFCN.loadedLibs[handle]) { DLFCN.errorMsg = 'Tried to dlclose() unopened handle: ' + handle; return 1; } else { var lib_record = DLFCN.loadedLibs[handle]; if (--lib_record.refcount == 0) { if (lib_record.module.cleanups) { lib_record.module.cleanups.forEach(function(cleanup) { cleanup() }); } delete DLFCN.loadedLibNames[lib_record.name]; delete DLFCN.loadedLibs[handle]; } return 0; } }, // void* dlsym(void* handle, const char* symbol); dlsym__deps: ['$DLFCN'], dlsym__proxy: 'sync', dlsym__sig: 'iii', dlsym: function(handle, symbol) { // void *dlsym(void *restrict handle, const char *restrict name); // http://pubs.opengroup.org/onlinepubs/009695399/functions/dlsym.html symbol = Pointer_stringify(symbol); if (!DLFCN.loadedLibs[handle]) { DLFCN.errorMsg = 'Tried to dlsym() from an unopened handle: ' + handle; return 0; } else { var lib = DLFCN.loadedLibs[handle]; symbol = '_' + symbol; if (lib.cached_functions.hasOwnProperty(symbol)) { return lib.cached_functions[symbol]; } if (!lib.module.hasOwnProperty(symbol)) { DLFCN.errorMsg = ('Tried to lookup unknown symbol "' + symbol + '" in dynamic lib: ' + lib.name); return 0; } else { var result = lib.module[symbol]; if (typeof result == 'function') { result = addFunction(result); //Module.printErr('adding function dlsym result for ' + symbol + ' => ' + result); lib.cached_functions = result; } return result; } } }, // char* dlerror(void); dlerror__deps: ['$DLFCN'], dlerror__proxy: 'sync', dlerror__sig: 'i', dlerror: function() { // char *dlerror(void); // http://pubs.opengroup.org/onlinepubs/009695399/functions/dlerror.html if (DLFCN.errorMsg === null) { return 0; } else { if (DLFCN.error) _free(DLFCN.error); var msgArr = intArrayFromString(DLFCN.errorMsg); DLFCN.error = allocate(msgArr, 'i8', ALLOC_NORMAL); DLFCN.errorMsg = null; return DLFCN.error; } }, dladdr__proxy: 'sync', dladdr__sig: 'iii', dladdr: function(addr, info) { // report all function pointers as coming from this program itself XXX not really correct in any way var fname = allocate(intArrayFromString(Module['thisProgram'] || './this.program'), 'i8', ALLOC_NORMAL); // XXX leak {{{ makeSetValue('addr', 0, 'fname', 'i32') }}}; {{{ makeSetValue('addr', QUANTUM_SIZE, '0', 'i32') }}}; {{{ makeSetValue('addr', QUANTUM_SIZE*2, '0', 'i32') }}}; {{{ makeSetValue('addr', QUANTUM_SIZE*3, '0', 'i32') }}}; return 1; }, // ========================================================================== // pwd.h // ========================================================================== // TODO: Implement. // http://pubs.opengroup.org/onlinepubs/009695399/basedefs/pwd.h.html getpwuid: function(uid) { return 0; // NULL }, // ========================================================================== // time.h // ========================================================================== clock: function() { if (_clock.start === undefined) _clock.start = Date.now(); return ((Date.now() - _clock.start) * ({{{ cDefine('CLOCKS_PER_SEC') }}} / 1000))|0; }, time: function(ptr) { var ret = (Date.now()/1000)|0; if (ptr) { {{{ makeSetValue('ptr', 0, 'ret', 'i32') }}}; } return ret; }, difftime: function(time1, time0) { return time1 - time0; }, // Statically allocated time struct. #if USE_PTHREADS __tm_current: '; if (ENVIRONMENT_IS_PTHREAD) ___tm_current = PthreadWorkerInit.___tm_current; else PthreadWorkerInit.___tm_current = ___tm_current = allocate({{{ C_STRUCTS.tm.__size__ }}}, "i8", ALLOC_STATIC)', __tm_timezone: '; if (ENVIRONMENT_IS_PTHREAD) ___tm_timezone = PthreadWorkerInit.___tm_timezone; else PthreadWorkerInit.___tm_timezone = ___tm_timezone = allocate(intArrayFromString("GMT"), "i8", ALLOC_STATIC)', __tm_formatted: '; if (ENVIRONMENT_IS_PTHREAD) ___tm_formatted = PthreadWorkerInit.___tm_formatted; else PthreadWorkerInit.___tm_formatted = ___tm_formatted = allocate({{{ C_STRUCTS.tm.__size__ }}}, "i8", ALLOC_STATIC)', #else __tm_current: '{{{ makeStaticAlloc(C_STRUCTS.tm.__size__) }}}', // Statically allocated copy of the string "GMT" for gmtime() to point to __tm_timezone: 'allocate(intArrayFromString("GMT"), "i8", ALLOC_STATIC)', // Statically allocated time strings. __tm_formatted: '{{{ makeStaticAlloc(C_STRUCTS.tm.__size__) }}}', #endif mktime__deps: ['tzset'], mktime: function(tmPtr) { _tzset(); var date = new Date({{{ makeGetValue('tmPtr', C_STRUCTS.tm.tm_year, 'i32') }}} + 1900, {{{ makeGetValue('tmPtr', C_STRUCTS.tm.tm_mon, 'i32') }}}, {{{ makeGetValue('tmPtr', C_STRUCTS.tm.tm_mday, 'i32') }}}, {{{ makeGetValue('tmPtr', C_STRUCTS.tm.tm_hour, 'i32') }}}, {{{ makeGetValue('tmPtr', C_STRUCTS.tm.tm_min, 'i32') }}}, {{{ makeGetValue('tmPtr', C_STRUCTS.tm.tm_sec, 'i32') }}}, 0); // There's an ambiguous hour when the time goes back; the tm_isdst field is // used to disambiguate it. Date() basically guesses, so we fix it up if it // guessed wrong, or fill in tm_isdst with the guess if it's -1. var dst = {{{ makeGetValue('tmPtr', C_STRUCTS.tm.tm_isdst, 'i32') }}}; var guessedOffset = date.getTimezoneOffset(); var start = new Date(date.getFullYear(), 0, 1); var summerOffset = new Date(2000, 6, 1).getTimezoneOffset(); var winterOffset = start.getTimezoneOffset(); var dstOffset = Math.min(winterOffset, summerOffset); // DST is in December in South if (dst < 0) { // Attention: some regions don't have DST at all. {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_isdst, 'Number(summerOffset != winterOffset && dstOffset == guessedOffset)', 'i32') }}}; } else if ((dst > 0) != (dstOffset == guessedOffset)) { var nonDstOffset = Math.max(winterOffset, summerOffset); var trueOffset = dst > 0 ? dstOffset : nonDstOffset; // Don't try setMinutes(date.getMinutes() + ...) -- it's messed up. date.setTime(date.getTime() + (trueOffset - guessedOffset)*60000); } {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_wday, 'date.getDay()', 'i32') }}}; var yday = ((date.getTime() - start.getTime()) / (1000 * 60 * 60 * 24))|0; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_yday, 'yday', 'i32') }}}; return (date.getTime() / 1000)|0; }, timelocal: 'mktime', gmtime__deps: ['__tm_current', 'gmtime_r'], gmtime: function(time) { return _gmtime_r(time, ___tm_current); }, gmtime_r__deps: ['__tm_timezone'], gmtime_r: function(time, tmPtr) { var date = new Date({{{ makeGetValue('time', 0, 'i32') }}}*1000); {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_sec, 'date.getUTCSeconds()', 'i32') }}}; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_min, 'date.getUTCMinutes()', 'i32') }}}; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_hour, 'date.getUTCHours()', 'i32') }}}; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_mday, 'date.getUTCDate()', 'i32') }}}; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_mon, 'date.getUTCMonth()', 'i32') }}}; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_year, 'date.getUTCFullYear()-1900', 'i32') }}}; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_wday, 'date.getUTCDay()', 'i32') }}}; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_gmtoff, '0', 'i32') }}}; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_isdst, '0', 'i32') }}}; var start = Date.UTC(date.getUTCFullYear(), 0, 1, 0, 0, 0, 0); var yday = ((date.getTime() - start) / (1000 * 60 * 60 * 24))|0; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_yday, 'yday', 'i32') }}}; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_zone, '___tm_timezone', 'i32') }}}; return tmPtr; }, timegm__deps: ['tzset'], timegm: function(tmPtr) { _tzset(); var time = Date.UTC({{{ makeGetValue('tmPtr', C_STRUCTS.tm.tm_year, 'i32') }}} + 1900, {{{ makeGetValue('tmPtr', C_STRUCTS.tm.tm_mon, 'i32') }}}, {{{ makeGetValue('tmPtr', C_STRUCTS.tm.tm_mday, 'i32') }}}, {{{ makeGetValue('tmPtr', C_STRUCTS.tm.tm_hour, 'i32') }}}, {{{ makeGetValue('tmPtr', C_STRUCTS.tm.tm_min, 'i32') }}}, {{{ makeGetValue('tmPtr', C_STRUCTS.tm.tm_sec, 'i32') }}}, 0); var date = new Date(time); {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_wday, 'date.getUTCDay()', 'i32') }}}; var start = Date.UTC(date.getUTCFullYear(), 0, 1, 0, 0, 0, 0); var yday = ((date.getTime() - start) / (1000 * 60 * 60 * 24))|0; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_yday, 'yday', 'i32') }}}; return (date.getTime() / 1000)|0; }, localtime__deps: ['__tm_current', 'localtime_r'], localtime: function(time) { return _localtime_r(time, ___tm_current); }, localtime_r__deps: ['__tm_timezone', 'tzset'], localtime_r: function(time, tmPtr) { _tzset(); var date = new Date({{{ makeGetValue('time', 0, 'i32') }}}*1000); {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_sec, 'date.getSeconds()', 'i32') }}}; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_min, 'date.getMinutes()', 'i32') }}}; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_hour, 'date.getHours()', 'i32') }}}; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_mday, 'date.getDate()', 'i32') }}}; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_mon, 'date.getMonth()', 'i32') }}}; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_year, 'date.getFullYear()-1900', 'i32') }}}; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_wday, 'date.getDay()', 'i32') }}}; var start = new Date(date.getFullYear(), 0, 1); var yday = ((date.getTime() - start.getTime()) / (1000 * 60 * 60 * 24))|0; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_yday, 'yday', 'i32') }}}; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_gmtoff, '-(date.getTimezoneOffset() * 60)', 'i32') }}}; // Attention: DST is in December in South, and some regions don't have DST at all. var summerOffset = new Date(2000, 6, 1).getTimezoneOffset(); var winterOffset = start.getTimezoneOffset(); var dst = (summerOffset != winterOffset && date.getTimezoneOffset() == Math.min(winterOffset, summerOffset))|0; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_isdst, 'dst', 'i32') }}}; var zonePtr = {{{ makeGetValue(makeGlobalUse('_tzname'), 'dst ? ' + Runtime.QUANTUM_SIZE + ' : 0', 'i32') }}}; {{{ makeSetValue('tmPtr', C_STRUCTS.tm.tm_zone, 'zonePtr', 'i32') }}}; return tmPtr; }, asctime__deps: ['__tm_formatted', 'asctime_r'], asctime: function(tmPtr) { return _asctime_r(tmPtr, ___tm_formatted); }, asctime_r__deps: ['__tm_formatted', 'mktime'], asctime_r: function(tmPtr, buf) { var date = { tm_sec: {{{ makeGetValue('tmPtr', C_STRUCTS.tm.tm_sec, 'i32') }}}, tm_min: {{{ makeGetValue('tmPtr', C_STRUCTS.tm.tm_min, 'i32') }}}, tm_hour: {{{ makeGetValue('tmPtr', C_STRUCTS.tm.tm_hour, 'i32') }}}, tm_mday: {{{ makeGetValue('tmPtr', C_STRUCTS.tm.tm_mday, 'i32') }}}, tm_mon: {{{ makeGetValue('tmPtr', C_STRUCTS.tm.tm_mon, 'i32') }}}, tm_year: {{{ makeGetValue('tmPtr', C_STRUCTS.tm.tm_year, 'i32') }}}, tm_wday: {{{ makeGetValue('tmPtr', C_STRUCTS.tm.tm_wday, 'i32') }}} }; var days = [ "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" ]; var months = [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" ]; var s = days[date.tm_wday] + ' ' + months[date.tm_mon] + (date.tm_mday < 10 ? ' ' : ' ') + date.tm_mday + (date.tm_hour < 10 ? ' 0' : ' ') + date.tm_hour + (date.tm_min < 10 ? ':0' : ':') + date.tm_min + (date.tm_sec < 10 ? ':0' : ':') + date.tm_sec + ' ' + (1900 + date.tm_year) + "\n"; // asctime_r is specced to behave in an undefined manner if the algorithm would attempt // to write out more than 26 bytes (including the null terminator). // See http://pubs.opengroup.org/onlinepubs/9699919799/functions/asctime.html // Our undefined behavior is to truncate the write to at most 26 bytes, including null terminator. stringToUTF8(s, buf, 26); return buf; }, ctime__deps: ['__tm_current', 'ctime_r'], ctime: function(timer) { return _ctime_r(timer, ___tm_current); }, ctime_r__deps: ['localtime_r', 'asctime_r'], ctime_r: function(time, buf) { var stack = stackSave(); var rv = _asctime_r(_localtime_r(time, stackAlloc({{{ C_STRUCTS.tm.__size__ }}})), buf); stackRestore(stack); return rv; }, dysize: function(year) { var leap = ((year % 4 == 0) && ((year % 100 != 0) || (year % 400 == 0))); return leap ? 366 : 365; }, // TODO: Initialize these to defaults on startup from system settings. // Note: glibc has one fewer underscore for all of these. Also used in other related functions (timegm) #if USE_PTHREADS tzname: '; if (ENVIRONMENT_IS_PTHREAD) _tzname = PthreadWorkerInit._tzname; else PthreadWorkerInit._tzname = _tzname = allocate({{{ 2*Runtime.QUANTUM_SIZE }}}, "i32*", ALLOC_STATIC)', daylight: '; if (ENVIRONMENT_IS_PTHREAD) _daylight = PthreadWorkerInit._daylight; else PthreadWorkerInit._daylight = _daylight = allocate(1, "i32*", ALLOC_STATIC)', timezone: '; if (ENVIRONMENT_IS_PTHREAD) _timezone = PthreadWorkerInit._timezone; else PthreadWorkerInit._timezone = _timezone = allocate(1, "i32*", ALLOC_STATIC)', #else tzname: '{{{ makeStaticAlloc(2*Runtime.QUANTUM_SIZE) }}}', daylight: '{{{ makeStaticAlloc(1) }}}', timezone: '{{{ makeStaticAlloc(1) }}}', #endif tzset__deps: ['tzname', 'daylight', 'timezone'], tzset__proxy: 'sync', tzset__sig: 'v', tzset: function() { // TODO: Use (malleable) environment variables instead of system settings. if (_tzset.called) return; _tzset.called = true; {{{ makeSetValue(makeGlobalUse('_timezone'), '0', '-(new Date()).getTimezoneOffset() * 60', 'i32') }}}; var winter = new Date(2000, 0, 1); var summer = new Date(2000, 6, 1); {{{ makeSetValue(makeGlobalUse('_daylight'), '0', 'Number(winter.getTimezoneOffset() != summer.getTimezoneOffset())', 'i32') }}}; function extractZone(date) { var match = date.toTimeString().match(/\(([A-Za-z ]+)\)$/); return match ? match[1] : "GMT"; }; var winterName = extractZone(winter); var summerName = extractZone(summer); var winterNamePtr = allocate(intArrayFromString(winterName), 'i8', ALLOC_NORMAL); var summerNamePtr = allocate(intArrayFromString(summerName), 'i8', ALLOC_NORMAL); if (summer.getTimezoneOffset() < winter.getTimezoneOffset()) { // Northern hemisphere {{{ makeSetValue(makeGlobalUse('_tzname'), '0', 'winterNamePtr', 'i32') }}}; {{{ makeSetValue(makeGlobalUse('_tzname'), Runtime.QUANTUM_SIZE, 'summerNamePtr', 'i32') }}}; } else { {{{ makeSetValue(makeGlobalUse('_tzname'), '0', 'summerNamePtr', 'i32') }}}; {{{ makeSetValue(makeGlobalUse('_tzname'), Runtime.QUANTUM_SIZE, 'winterNamePtr', 'i32') }}}; } }, stime__deps: ['$ERRNO_CODES', '__setErrNo'], stime: function(when) { ___setErrNo(ERRNO_CODES.EPERM); return -1; }, __map_file__deps: ['$ERRNO_CODES', '__setErrNo'], __map_file: function(pathname, size) { ___setErrNo(ERRNO_CODES.EPERM); return -1; }, _MONTH_DAYS_REGULAR: [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31], _MONTH_DAYS_LEAP: [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31], _isLeapYear: function(year) { return year%4 === 0 && (year%100 !== 0 || year%400 === 0); }, _arraySum: function(array, index) { var sum = 0; for (var i = 0; i <= index; sum += array[i++]); return sum; }, _addDays__deps: ['_isLeapYear', '_MONTH_DAYS_LEAP', '_MONTH_DAYS_REGULAR'], _addDays: function(date, days) { var newDate = new Date(date.getTime()); while(days > 0) { var leap = __isLeapYear(newDate.getFullYear()); var currentMonth = newDate.getMonth(); var daysInCurrentMonth = (leap ? __MONTH_DAYS_LEAP : __MONTH_DAYS_REGULAR)[currentMonth]; if (days > daysInCurrentMonth-newDate.getDate()) { // we spill over to next month days -= (daysInCurrentMonth-newDate.getDate()+1); newDate.setDate(1); if (currentMonth < 11) { newDate.setMonth(currentMonth+1) } else { newDate.setMonth(0); newDate.setFullYear(newDate.getFullYear()+1); } } else { // we stay in current month newDate.setDate(newDate.getDate()+days); return newDate; } } return newDate; }, strftime__deps: ['_isLeapYear', '_arraySum', '_addDays', '_MONTH_DAYS_REGULAR', '_MONTH_DAYS_LEAP'], strftime: function(s, maxsize, format, tm) { // size_t strftime(char *restrict s, size_t maxsize, const char *restrict format, const struct tm *restrict timeptr); // http://pubs.opengroup.org/onlinepubs/009695399/functions/strftime.html var tm_zone = {{{ makeGetValue('tm', C_STRUCTS.tm.tm_zone, 'i32') }}}; var date = { tm_sec: {{{ makeGetValue('tm', C_STRUCTS.tm.tm_sec, 'i32') }}}, tm_min: {{{ makeGetValue('tm', C_STRUCTS.tm.tm_min, 'i32') }}}, tm_hour: {{{ makeGetValue('tm', C_STRUCTS.tm.tm_hour, 'i32') }}}, tm_mday: {{{ makeGetValue('tm', C_STRUCTS.tm.tm_mday, 'i32') }}}, tm_mon: {{{ makeGetValue('tm', C_STRUCTS.tm.tm_mon, 'i32') }}}, tm_year: {{{ makeGetValue('tm', C_STRUCTS.tm.tm_year, 'i32') }}}, tm_wday: {{{ makeGetValue('tm', C_STRUCTS.tm.tm_wday, 'i32') }}}, tm_yday: {{{ makeGetValue('tm', C_STRUCTS.tm.tm_yday, 'i32') }}}, tm_isdst: {{{ makeGetValue('tm', C_STRUCTS.tm.tm_isdst, 'i32') }}}, tm_gmtoff: {{{ makeGetValue('tm', C_STRUCTS.tm.tm_gmtoff, 'i32') }}}, tm_zone: tm_zone ? Pointer_stringify(tm_zone) : '' }; var pattern = Pointer_stringify(format); // expand format var EXPANSION_RULES_1 = { '%c': '%a %b %d %H:%M:%S %Y', // Replaced by the locale's appropriate date and time representation - e.g., Mon Aug 3 14:02:01 2013 '%D': '%m/%d/%y', // Equivalent to %m / %d / %y '%F': '%Y-%m-%d', // Equivalent to %Y - %m - %d '%h': '%b', // Equivalent to %b '%r': '%I:%M:%S %p', // Replaced by the time in a.m. and p.m. notation '%R': '%H:%M', // Replaced by the time in 24-hour notation '%T': '%H:%M:%S', // Replaced by the time '%x': '%m/%d/%y', // Replaced by the locale's appropriate date representation '%X': '%H:%M:%S' // Replaced by the locale's appropriate date representation }; for (var rule in EXPANSION_RULES_1) { pattern = pattern.replace(new RegExp(rule, 'g'), EXPANSION_RULES_1[rule]); } var WEEKDAYS = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']; var MONTHS = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']; function leadingSomething(value, digits, character) { var str = typeof value === 'number' ? value.toString() : (value || ''); while (str.length < digits) { str = character[0]+str; } return str; }; function leadingNulls(value, digits) { return leadingSomething(value, digits, '0'); }; function compareByDay(date1, date2) { function sgn(value) { return value < 0 ? -1 : (value > 0 ? 1 : 0); }; var compare; if ((compare = sgn(date1.getFullYear()-date2.getFullYear())) === 0) { if ((compare = sgn(date1.getMonth()-date2.getMonth())) === 0) { compare = sgn(date1.getDate()-date2.getDate()); } } return compare; }; function getFirstWeekStartDate(janFourth) { switch (janFourth.getDay()) { case 0: // Sunday return new Date(janFourth.getFullYear()-1, 11, 29); case 1: // Monday return janFourth; case 2: // Tuesday return new Date(janFourth.getFullYear(), 0, 3); case 3: // Wednesday return new Date(janFourth.getFullYear(), 0, 2); case 4: // Thursday return new Date(janFourth.getFullYear(), 0, 1); case 5: // Friday return new Date(janFourth.getFullYear()-1, 11, 31); case 6: // Saturday return new Date(janFourth.getFullYear()-1, 11, 30); } }; function getWeekBasedYear(date) { var thisDate = __addDays(new Date(date.tm_year+1900, 0, 1), date.tm_yday); var janFourthThisYear = new Date(thisDate.getFullYear(), 0, 4); var janFourthNextYear = new Date(thisDate.getFullYear()+1, 0, 4); var firstWeekStartThisYear = getFirstWeekStartDate(janFourthThisYear); var firstWeekStartNextYear = getFirstWeekStartDate(janFourthNextYear); if (compareByDay(firstWeekStartThisYear, thisDate) <= 0) { // this date is after the start of the first week of this year if (compareByDay(firstWeekStartNextYear, thisDate) <= 0) { return thisDate.getFullYear()+1; } else { return thisDate.getFullYear(); } } else { return thisDate.getFullYear()-1; } }; var EXPANSION_RULES_2 = { '%a': function(date) { return WEEKDAYS[date.tm_wday].substring(0,3); }, '%A': function(date) { return WEEKDAYS[date.tm_wday]; }, '%b': function(date) { return MONTHS[date.tm_mon].substring(0,3); }, '%B': function(date) { return MONTHS[date.tm_mon]; }, '%C': function(date) { var year = date.tm_year+1900; return leadingNulls((year/100)|0,2); }, '%d': function(date) { return leadingNulls(date.tm_mday, 2); }, '%e': function(date) { return leadingSomething(date.tm_mday, 2, ' '); }, '%g': function(date) { // %g, %G, and %V give values according to the ISO 8601:2000 standard week-based year. // In this system, weeks begin on a Monday and week 1 of the year is the week that includes // January 4th, which is also the week that includes the first Thursday of the year, and // is also the first week that contains at least four days in the year. // If the first Monday of January is the 2nd, 3rd, or 4th, the preceding days are part of // the last week of the preceding year; thus, for Saturday 2nd January 1999, // %G is replaced by 1998 and %V is replaced by 53. If December 29th, 30th, // or 31st is a Monday, it and any following days are part of week 1 of the following year. // Thus, for Tuesday 30th December 1997, %G is replaced by 1998 and %V is replaced by 01. return getWeekBasedYear(date).toString().substring(2); }, '%G': function(date) { return getWeekBasedYear(date); }, '%H': function(date) { return leadingNulls(date.tm_hour, 2); }, '%I': function(date) { var twelveHour = date.tm_hour; if (twelveHour == 0) twelveHour = 12; else if (twelveHour > 12) twelveHour -= 12; return leadingNulls(twelveHour, 2); }, '%j': function(date) { // Day of the year (001-366) return leadingNulls(date.tm_mday+__arraySum(__isLeapYear(date.tm_year+1900) ? __MONTH_DAYS_LEAP : __MONTH_DAYS_REGULAR, date.tm_mon-1), 3); }, '%m': function(date) { return leadingNulls(date.tm_mon+1, 2); }, '%M': function(date) { return leadingNulls(date.tm_min, 2); }, '%n': function() { return '\n'; }, '%p': function(date) { if (date.tm_hour >= 0 && date.tm_hour < 12) { return 'AM'; } else { return 'PM'; } }, '%S': function(date) { return leadingNulls(date.tm_sec, 2); }, '%t': function() { return '\t'; }, '%u': function(date) { var day = new Date(date.tm_year+1900, date.tm_mon+1, date.tm_mday, 0, 0, 0, 0); return day.getDay() || 7; }, '%U': function(date) { // Replaced by the week number of the year as a decimal number [00,53]. // The first Sunday of January is the first day of week 1; // days in the new year before this are in week 0. [ tm_year, tm_wday, tm_yday] var janFirst = new Date(date.tm_year+1900, 0, 1); var firstSunday = janFirst.getDay() === 0 ? janFirst : __addDays(janFirst, 7-janFirst.getDay()); var endDate = new Date(date.tm_year+1900, date.tm_mon, date.tm_mday); // is target date after the first Sunday? if (compareByDay(firstSunday, endDate) < 0) { // calculate difference in days between first Sunday and endDate var februaryFirstUntilEndMonth = __arraySum(__isLeapYear(endDate.getFullYear()) ? __MONTH_DAYS_LEAP : __MONTH_DAYS_REGULAR, endDate.getMonth()-1)-31; var firstSundayUntilEndJanuary = 31-firstSunday.getDate(); var days = firstSundayUntilEndJanuary+februaryFirstUntilEndMonth+endDate.getDate(); return leadingNulls(Math.ceil(days/7), 2); } return compareByDay(firstSunday, janFirst) === 0 ? '01': '00'; }, '%V': function(date) { // Replaced by the week number of the year (Monday as the first day of the week) // as a decimal number [01,53]. If the week containing 1 January has four // or more days in the new year, then it is considered week 1. // Otherwise, it is the last week of the previous year, and the next week is week 1. // Both January 4th and the first Thursday of January are always in week 1. [ tm_year, tm_wday, tm_yday] var janFourthThisYear = new Date(date.tm_year+1900, 0, 4); var janFourthNextYear = new Date(date.tm_year+1901, 0, 4); var firstWeekStartThisYear = getFirstWeekStartDate(janFourthThisYear); var firstWeekStartNextYear = getFirstWeekStartDate(janFourthNextYear); var endDate = __addDays(new Date(date.tm_year+1900, 0, 1), date.tm_yday); if (compareByDay(endDate, firstWeekStartThisYear) < 0) { // if given date is before this years first week, then it belongs to the 53rd week of last year return '53'; } if (compareByDay(firstWeekStartNextYear, endDate) <= 0) { // if given date is after next years first week, then it belongs to the 01th week of next year return '01'; } // given date is in between CW 01..53 of this calendar year var daysDifference; if (firstWeekStartThisYear.getFullYear() < date.tm_year+1900) { // first CW of this year starts last year daysDifference = date.tm_yday+32-firstWeekStartThisYear.getDate() } else { // first CW of this year starts this year daysDifference = date.tm_yday+1-firstWeekStartThisYear.getDate(); } return leadingNulls(Math.ceil(daysDifference/7), 2); }, '%w': function(date) { var day = new Date(date.tm_year+1900, date.tm_mon+1, date.tm_mday, 0, 0, 0, 0); return day.getDay(); }, '%W': function(date) { // Replaced by the week number of the year as a decimal number [00,53]. // The first Monday of January is the first day of week 1; // days in the new year before this are in week 0. [ tm_year, tm_wday, tm_yday] var janFirst = new Date(date.tm_year, 0, 1); var firstMonday = janFirst.getDay() === 1 ? janFirst : __addDays(janFirst, janFirst.getDay() === 0 ? 1 : 7-janFirst.getDay()+1); var endDate = new Date(date.tm_year+1900, date.tm_mon, date.tm_mday); // is target date after the first Monday? if (compareByDay(firstMonday, endDate) < 0) { var februaryFirstUntilEndMonth = __arraySum(__isLeapYear(endDate.getFullYear()) ? __MONTH_DAYS_LEAP : __MONTH_DAYS_REGULAR, endDate.getMonth()-1)-31; var firstMondayUntilEndJanuary = 31-firstMonday.getDate(); var days = firstMondayUntilEndJanuary+februaryFirstUntilEndMonth+endDate.getDate(); return leadingNulls(Math.ceil(days/7), 2); } return compareByDay(firstMonday, janFirst) === 0 ? '01': '00'; }, '%y': function(date) { // Replaced by the last two digits of the year as a decimal number [00,99]. [ tm_year] return (date.tm_year+1900).toString().substring(2); }, '%Y': function(date) { // Replaced by the year as a decimal number (for example, 1997). [ tm_year] return date.tm_year+1900; }, '%z': function(date) { // Replaced by the offset from UTC in the ISO 8601:2000 standard format ( +hhmm or -hhmm ). // For example, "-0430" means 4 hours 30 minutes behind UTC (west of Greenwich). var off = date.tm_gmtoff; var ahead = off >= 0; off = Math.abs(off) / 60; // convert from minutes into hhmm format (which means 60 minutes = 100 units) off = (off / 60)*100 + (off % 60); return (ahead ? '+' : '-') + String("0000" + off).slice(-4); }, '%Z': function(date) { return date.tm_zone; }, '%%': function() { return '%'; } }; for (var rule in EXPANSION_RULES_2) { if (pattern.indexOf(rule) >= 0) { pattern = pattern.replace(new RegExp(rule, 'g'), EXPANSION_RULES_2[rule](date)); } } var bytes = intArrayFromString(pattern, false); if (bytes.length > maxsize) { return 0; } writeArrayToMemory(bytes, s); return bytes.length-1; }, strftime_l__deps: ['strftime'], strftime_l: function(s, maxsize, format, tm) { return _strftime(s, maxsize, format, tm); // no locale support yet }, strptime__deps: ['_isLeapYear', '_arraySum', '_addDays', '_MONTH_DAYS_REGULAR', '_MONTH_DAYS_LEAP'], strptime: function(buf, format, tm) { // char *strptime(const char *restrict buf, const char *restrict format, struct tm *restrict tm); // http://pubs.opengroup.org/onlinepubs/009695399/functions/strptime.html var pattern = Pointer_stringify(format); // escape special characters // TODO: not sure we really need to escape all of these in JS regexps var SPECIAL_CHARS = '\\!@#$^&*()+=-[]/{}|:<>?,.'; for (var i=0, ii=SPECIAL_CHARS.length; i<ii; ++i) { pattern = pattern.replace(new RegExp('\\'+SPECIAL_CHARS[i], 'g'), '\\'+SPECIAL_CHARS[i]); } // reduce number of matchers var EQUIVALENT_MATCHERS = { '%A': '%a', '%B': '%b', '%c': '%x\\s+%X', '%D': '%m\\/%d\\/%y', '%e': '%d', '%h': '%b', '%R': '%H\\:%M', '%r': '%I\\:%M\\:%S\\s%p', '%T': '%H\\:%M\\:%S', '%x': '%m\\/%d\\/(?:%y|%Y)', '%X': '%H\\:%M\\:%S' }; for (var matcher in EQUIVALENT_MATCHERS) { pattern = pattern.replace(matcher, EQUIVALENT_MATCHERS[matcher]); } // TODO: take care of locale var DATE_PATTERNS = { /* weeday name */ '%a': '(?:Sun(?:day)?)|(?:Mon(?:day)?)|(?:Tue(?:sday)?)|(?:Wed(?:nesday)?)|(?:Thu(?:rsday)?)|(?:Fri(?:day)?)|(?:Sat(?:urday)?)', /* month name */ '%b': '(?:Jan(?:uary)?)|(?:Feb(?:ruary)?)|(?:Mar(?:ch)?)|(?:Apr(?:il)?)|May|(?:Jun(?:e)?)|(?:Jul(?:y)?)|(?:Aug(?:ust)?)|(?:Sep(?:tember)?)|(?:Oct(?:ober)?)|(?:Nov(?:ember)?)|(?:Dec(?:ember)?)', /* century */ '%C': '\\d\\d', /* day of month */ '%d': '0[1-9]|[1-9](?!\\d)|1\\d|2\\d|30|31', /* hour (24hr) */ '%H': '\\d(?!\\d)|[0,1]\\d|20|21|22|23', /* hour (12hr) */ '%I': '\\d(?!\\d)|0\\d|10|11|12', /* day of year */ '%j': '00[1-9]|0?[1-9](?!\\d)|0?[1-9]\\d(?!\\d)|[1,2]\\d\\d|3[0-6]\\d', /* month */ '%m': '0[1-9]|[1-9](?!\\d)|10|11|12', /* minutes */ '%M': '0\\d|\\d(?!\\d)|[1-5]\\d', /* whitespace */ '%n': '\\s', /* AM/PM */ '%p': 'AM|am|PM|pm|A\\.M\\.|a\\.m\\.|P\\.M\\.|p\\.m\\.', /* seconds */ '%S': '0\\d|\\d(?!\\d)|[1-5]\\d|60', /* week number */ '%U': '0\\d|\\d(?!\\d)|[1-4]\\d|50|51|52|53', /* week number */ '%W': '0\\d|\\d(?!\\d)|[1-4]\\d|50|51|52|53', /* weekday number */ '%w': '[0-6]', /* 2-digit year */ '%y': '\\d\\d', /* 4-digit year */ '%Y': '\\d\\d\\d\\d', /* % */ '%%': '%', /* whitespace */ '%t': '\\s', }; var MONTH_NUMBERS = {JAN: 0, FEB: 1, MAR: 2, APR: 3, MAY: 4, JUN: 5, JUL: 6, AUG: 7, SEP: 8, OCT: 9, NOV: 10, DEC: 11}; var DAY_NUMBERS_SUN_FIRST = {SUN: 0, MON: 1, TUE: 2, WED: 3, THU: 4, FRI: 5, SAT: 6}; var DAY_NUMBERS_MON_FIRST = {MON: 0, TUE: 1, WED: 2, THU: 3, FRI: 4, SAT: 5, SUN: 6}; for (var datePattern in DATE_PATTERNS) { pattern = pattern.replace(datePattern, '('+datePattern+DATE_PATTERNS[datePattern]+')'); } // take care of capturing groups var capture = []; for (var i=pattern.indexOf('%'); i>=0; i=pattern.indexOf('%')) { capture.push(pattern[i+1]); pattern = pattern.replace(new RegExp('\\%'+pattern[i+1], 'g'), ''); } var matches = new RegExp('^'+pattern, "i").exec(Pointer_stringify(buf)) // Module['print'](Pointer_stringify(buf)+ ' is matched by '+((new RegExp('^'+pattern)).source)+' into: '+JSON.stringify(matches)); function initDate() { function fixup(value, min, max) { return (typeof value !== 'number' || isNaN(value)) ? min : (value>=min ? (value<=max ? value: max): min); }; return { year: fixup({{{ makeGetValue('tm', C_STRUCTS.tm.tm_year, 'i32', 0, 0, 1) }}} + 1900 , 1970, 9999), month: fixup({{{ makeGetValue('tm', C_STRUCTS.tm.tm_mon, 'i32', 0, 0, 1) }}}, 0, 11), day: fixup({{{ makeGetValue('tm', C_STRUCTS.tm.tm_mday, 'i32', 0, 0, 1) }}}, 1, 31), hour: fixup({{{ makeGetValue('tm', C_STRUCTS.tm.tm_hour, 'i32', 0, 0, 1) }}}, 0, 23), min: fixup({{{ makeGetValue('tm', C_STRUCTS.tm.tm_min, 'i32', 0, 0, 1) }}}, 0, 59), sec: fixup({{{ makeGetValue('tm', C_STRUCTS.tm.tm_sec, 'i32', 0, 0, 1) }}}, 0, 59) }; }; if (matches) { var date = initDate(); var value; function getMatch(symbol) { var pos = capture.indexOf(symbol); // check if symbol appears in regexp if (pos >= 0) { // return matched value or null (falsy!) for non-matches return matches[pos+1]; } return; } // seconds if ((value=getMatch('S'))) { date.sec = parseInt(value); } // minutes if ((value=getMatch('M'))) { date.min = parseInt(value); } // hours if ((value=getMatch('H'))) { // 24h clock date.hour = parseInt(value); } else if ((value = getMatch('I'))) { // AM/PM clock var hour = parseInt(value); if ((value=getMatch('p'))) { hour += value.toUpperCase()[0] === 'P' ? 12 : 0; } date.hour = hour; } // year if ((value=getMatch('Y'))) { // parse from four-digit year date.year = parseInt(value); } else if ((value=getMatch('y'))) { // parse from two-digit year... var year = parseInt(value); if ((value=getMatch('C'))) { // ...and century year += parseInt(value)*100; } else { // ...and rule-of-thumb year += year<69 ? 2000 : 1900; } date.year = year; } // month if ((value=getMatch('m'))) { // parse from month number date.month = parseInt(value)-1; } else if ((value=getMatch('b'))) { // parse from month name date.month = MONTH_NUMBERS[value.substring(0,3).toUpperCase()] || 0; // TODO: derive month from day in year+year, week number+day of week+year } // day if ((value=getMatch('d'))) { // get day of month directly date.day = parseInt(value); } else if ((value=getMatch('j'))) { // get day of month from day of year ... var day = parseInt(value); var leapYear = __isLeapYear(date.year); for (var month=0; month<12; ++month) { var daysUntilMonth = __arraySum(leapYear ? __MONTH_DAYS_LEAP : __MONTH_DAYS_REGULAR, month-1); if (day<=daysUntilMonth+(leapYear ? __MONTH_DAYS_LEAP : __MONTH_DAYS_REGULAR)[month]) { date.day = day-daysUntilMonth; } } } else if ((value=getMatch('a'))) { // get day of month from weekday ... var weekDay = value.substring(0,3).toUpperCase(); if ((value=getMatch('U'))) { // ... and week number (Sunday being first day of week) // Week number of the year (Sunday as the first day of the week) as a decimal number [00,53]. // All days in a new year preceding the first Sunday are considered to be in week 0. var weekDayNumber = DAY_NUMBERS_SUN_FIRST[weekDay]; var weekNumber = parseInt(value); // January 1st var janFirst = new Date(date.year, 0, 1); var endDate; if (janFirst.getDay() === 0) { // Jan 1st is a Sunday, and, hence in the 1st CW endDate = __addDays(janFirst, weekDayNumber+7*(weekNumber-1)); } else { // Jan 1st is not a Sunday, and, hence still in the 0th CW endDate = __addDays(janFirst, 7-janFirst.getDay()+weekDayNumber+7*(weekNumber-1)); } date.day = endDate.getDate(); date.month = endDate.getMonth(); } else if ((value=getMatch('W'))) { // ... and week number (Monday being first day of week) // Week number of the year (Monday as the first day of the week) as a decimal number [00,53]. // All days in a new year preceding the first Monday are considered to be in week 0. var weekDayNumber = DAY_NUMBERS_MON_FIRST[weekDay]; var weekNumber = parseInt(value); // January 1st var janFirst = new Date(date.year, 0, 1); var endDate; if (janFirst.getDay()===1) { // Jan 1st is a Monday, and, hence in the 1st CW endDate = __addDays(janFirst, weekDayNumber+7*(weekNumber-1)); } else { // Jan 1st is not a Monday, and, hence still in the 0th CW endDate = __addDays(janFirst, 7-janFirst.getDay()+1+weekDayNumber+7*(weekNumber-1)); } date.day = endDate.getDate(); date.month = endDate.getMonth(); } } /* tm_sec int seconds after the minute 0-61* tm_min int minutes after the hour 0-59 tm_hour int hours since midnight 0-23 tm_mday int day of the month 1-31 tm_mon int months since January 0-11 tm_year int years since 1900 tm_wday int days since Sunday 0-6 tm_yday int days since January 1 0-365 tm_isdst int Daylight Saving Time flag */ var fullDate = new Date(date.year, date.month, date.day, date.hour, date.min, date.sec, 0); {{{ makeSetValue('tm', C_STRUCTS.tm.tm_sec, 'fullDate.getSeconds()', 'i32') }}}; {{{ makeSetValue('tm', C_STRUCTS.tm.tm_min, 'fullDate.getMinutes()', 'i32') }}}; {{{ makeSetValue('tm', C_STRUCTS.tm.tm_hour, 'fullDate.getHours()', 'i32') }}}; {{{ makeSetValue('tm', C_STRUCTS.tm.tm_mday, 'fullDate.getDate()', 'i32') }}}; {{{ makeSetValue('tm', C_STRUCTS.tm.tm_mon, 'fullDate.getMonth()', 'i32') }}}; {{{ makeSetValue('tm', C_STRUCTS.tm.tm_year, 'fullDate.getFullYear()-1900', 'i32') }}}; {{{ makeSetValue('tm', C_STRUCTS.tm.tm_wday, 'fullDate.getDay()', 'i32') }}}; {{{ makeSetValue('tm', C_STRUCTS.tm.tm_yday, '__arraySum(__isLeapYear(fullDate.getFullYear()) ? __MONTH_DAYS_LEAP : __MONTH_DAYS_REGULAR, fullDate.getMonth()-1)+fullDate.getDate()-1', 'i32') }}}; {{{ makeSetValue('tm', C_STRUCTS.tm.tm_isdst, '0', 'i32') }}}; // we need to convert the matched sequence into an integer array to take care of UTF-8 characters > 0x7F // TODO: not sure that intArrayFromString handles all unicode characters correctly return buf+intArrayFromString(matches[0]).length-1; } return 0; }, strptime_l__deps: ['strptime'], strptime_l: function(buf, format, tm) { return _strptime(buf, format, tm); // no locale support yet }, getdate: function(string) { // struct tm *getdate(const char *string); // http://pubs.opengroup.org/onlinepubs/009695399/functions/getdate.html // TODO: Implement. return 0; }, // ========================================================================== // sys/time.h // ========================================================================== clock_gettime__deps: ['emscripten_get_now', 'emscripten_get_now_is_monotonic', '$ERRNO_CODES', '__setErrNo'], clock_gettime: function(clk_id, tp) { // int clock_gettime(clockid_t clk_id, struct timespec *tp); var now; if (clk_id === {{{ cDefine('CLOCK_REALTIME') }}}) { now = Date.now(); } else if (clk_id === {{{ cDefine('CLOCK_MONOTONIC') }}} && _emscripten_get_now_is_monotonic()) { now = _emscripten_get_now(); } else { ___setErrNo(ERRNO_CODES.EINVAL); return -1; } {{{ makeSetValue('tp', C_STRUCTS.timespec.tv_sec, '(now/1000)|0', 'i32') }}}; // seconds {{{ makeSetValue('tp', C_STRUCTS.timespec.tv_nsec, '((now % 1000)*1000*1000)|0', 'i32') }}}; // nanoseconds return 0; }, __clock_gettime: 'clock_gettime', // musl internal alias clock_settime__deps: ['$ERRNO_CODES', '__setErrNo'], clock_settime: function(clk_id, tp) { // int clock_settime(clockid_t clk_id, const struct timespec *tp); // Nothing. ___setErrNo(clk_id === {{{ cDefine('CLOCK_REALTIME') }}} ? ERRNO_CODES.EPERM : ERRNO_CODES.EINVAL); return -1; }, clock_getres__deps: ['emscripten_get_now_res', 'emscripten_get_now_is_monotonic', '$ERRNO_CODES', '__setErrNo'], clock_getres: function(clk_id, res) { // int clock_getres(clockid_t clk_id, struct timespec *res); var nsec; if (clk_id === {{{ cDefine('CLOCK_REALTIME') }}}) { nsec = 1000 * 1000; // educated guess that it's milliseconds } else if (clk_id === {{{ cDefine('CLOCK_MONOTONIC') }}} && _emscripten_get_now_is_monotonic()) { nsec = _emscripten_get_now_res(); } else { ___setErrNo(ERRNO_CODES.EINVAL); return -1; } {{{ makeSetValue('res', C_STRUCTS.timespec.tv_sec, '(nsec/1000000000)|0', 'i32') }}}; {{{ makeSetValue('res', C_STRUCTS.timespec.tv_nsec, 'nsec', 'i32') }}} // resolution is nanoseconds return 0; }, clock_getcpuclockid__deps: ['$PROCINFO'], clock_getcpuclockid: function(pid, clk_id) { if (pid < 0) return ERRNO_CODES.ESRCH; if (pid !== 0 && pid !== PROCINFO.pid) return ERRNO_CODES.ENOSYS; if (clk_id) {{{ makeSetValue('clk_id', 0, 2/*CLOCK_PROCESS_CPUTIME_ID*/, 'i32') }}}; return 0; }, // http://pubs.opengroup.org/onlinepubs/000095399/basedefs/sys/time.h.html gettimeofday: function(ptr) { var now = Date.now(); {{{ makeSetValue('ptr', C_STRUCTS.timeval.tv_sec, '(now/1000)|0', 'i32') }}}; // seconds {{{ makeSetValue('ptr', C_STRUCTS.timeval.tv_usec, '((now % 1000)*1000)|0', 'i32') }}}; // microseconds return 0; }, // ========================================================================== // sys/timeb.h // ========================================================================== ftime: function(p) { var millis = Date.now(); {{{ makeSetValue('p', C_STRUCTS.timeb.time, '(millis/1000)|0', 'i32') }}}; {{{ makeSetValue('p', C_STRUCTS.timeb.millitm, 'millis % 1000', 'i16') }}}; {{{ makeSetValue('p', C_STRUCTS.timeb.timezone, '0', 'i16') }}}; // Obsolete field {{{ makeSetValue('p', C_STRUCTS.timeb.dstflag, '0', 'i16') }}}; // Obsolete field return 0; }, // ========================================================================== // sys/times.h // ========================================================================== times__deps: ['memset'], times: function(buffer) { // clock_t times(struct tms *buffer); // http://pubs.opengroup.org/onlinepubs/009695399/functions/times.html // NOTE: This is fake, since we can't calculate real CPU time usage in JS. if (buffer !== 0) { _memset(buffer, 0, {{{ C_STRUCTS.tms.__size__ }}}); } return 0; }, // ========================================================================== // sys/types.h // ========================================================================== // http://www.kernel.org/doc/man-pages/online/pages/man3/minor.3.html makedev: function(maj, min) { return ((maj) << 8 | (min)); }, gnu_dev_makedev: 'makedev', major: function(dev) { return ((dev) >> 8); }, gnu_dev_major: 'major', minor: function(dev) { return ((dev) & 0xff); }, gnu_dev_minor: 'minor', // ========================================================================== // setjmp.h // ========================================================================== // asm.js-style setjmp/longjmp support for wasm binaryen backend. // In asm.js compilation, various variables including setjmpId will be // generated within 'var asm' in emscripten.py, while in wasm compilation, // wasm side is considered as 'asm' so they are not generated. But // saveSetjmp() needs setjmpId and no other functions in wasm side needs it. // So we declare it here if WASM_BACKEND=1. #if WASM_BACKEND == 1 $setjmpId: 0, #endif saveSetjmp__asm: true, saveSetjmp__sig: 'iii', #if WASM_BACKEND == 1 saveSetjmp__deps: ['realloc', '$setjmpId'], #else saveSetjmp__deps: ['realloc'], #endif saveSetjmp: function(env, label, table, size) { // Not particularly fast: slow table lookup of setjmpId to label. But setjmp // prevents relooping anyhow, so slowness is to be expected. And typical case // is 1 setjmp per invocation, or less. env = env|0; label = label|0; table = table|0; size = size|0; var i = 0; setjmpId = (setjmpId+1)|0; {{{ makeSetValueAsm('env', '0', 'setjmpId', 'i32') }}}; while ((i|0) < (size|0)) { if ({{{ makeGetValueAsm('table', '(i<<3)', 'i32') }}} == 0) { {{{ makeSetValueAsm('table', '(i<<3)', 'setjmpId', 'i32') }}}; {{{ makeSetValueAsm('table', '(i<<3)+4', 'label', 'i32') }}}; // prepare next slot {{{ makeSetValueAsm('table', '(i<<3)+8', '0', 'i32') }}}; {{{ makeSetTempRet0('size') }}}; return table | 0; } i = i+1|0; } // grow the table size = (size*2)|0; table = _realloc(table|0, 8*(size+1|0)|0) | 0; table = _saveSetjmp(env|0, label|0, table|0, size|0) | 0; {{{ makeSetTempRet0('size') }}}; return table | 0; }, testSetjmp__asm: true, testSetjmp__sig: 'iii', testSetjmp: function(id, table, size) { id = id|0; table = table|0; size = size|0; var i = 0, curr = 0; while ((i|0) < (size|0)) { curr = {{{ makeGetValueAsm('table', '(i<<3)', 'i32') }}}; if ((curr|0) == 0) break; if ((curr|0) == (id|0)) { return {{{ makeGetValueAsm('table', '(i<<3)+4', 'i32') }}}; } i = i+1|0; } return 0; }, setjmp__deps: ['saveSetjmp', 'testSetjmp'], setjmp__inline: function(env) { // Save the label return '_saveSetjmp(' + env + ', label, setjmpTable)|0'; }, longjmp__deps: ['saveSetjmp', 'testSetjmp'], longjmp: function(env, value) { Module['setThrew'](env, value || 1); throw 'longjmp'; }, emscripten_longjmp__deps: ['longjmp'], emscripten_longjmp: function(env, value) { _longjmp(env, value); }, // ========================================================================== // sys/wait.h // ========================================================================== wait__deps: ['$ERRNO_CODES', '__setErrNo'], wait: function(stat_loc) { // pid_t wait(int *stat_loc); // http://pubs.opengroup.org/onlinepubs/009695399/functions/wait.html // Makes no sense in a single-process environment. ___setErrNo(ERRNO_CODES.ECHILD); return -1; }, // NOTE: These aren't really the same, but we use the same stub for them all. waitid: 'wait', waitpid: 'wait', wait3: 'wait', wait4: 'wait', // ========================================================================== // errno.h // ========================================================================== $ERRNO_CODES: { EPERM: {{{ cDefine('EPERM') }}}, ENOENT: {{{ cDefine('ENOENT') }}}, ESRCH: {{{ cDefine('ESRCH') }}}, EINTR: {{{ cDefine('EINTR') }}}, EIO: {{{ cDefine('EIO') }}}, ENXIO: {{{ cDefine('ENXIO') }}}, E2BIG: {{{ cDefine('E2BIG') }}}, ENOEXEC: {{{ cDefine('ENOEXEC') }}}, EBADF: {{{ cDefine('EBADF') }}}, ECHILD: {{{ cDefine('ECHILD') }}}, EAGAIN: {{{ cDefine('EAGAIN') }}}, EWOULDBLOCK: {{{ cDefine('EWOULDBLOCK') }}}, ENOMEM: {{{ cDefine('ENOMEM') }}}, EACCES: {{{ cDefine('EACCES') }}}, EFAULT: {{{ cDefine('EFAULT') }}}, ENOTBLK: {{{ cDefine('ENOTBLK') }}}, EBUSY: {{{ cDefine('EBUSY') }}}, EEXIST: {{{ cDefine('EEXIST') }}}, EXDEV: {{{ cDefine('EXDEV') }}}, ENODEV: {{{ cDefine('ENODEV') }}}, ENOTDIR: {{{ cDefine('ENOTDIR') }}}, EISDIR: {{{ cDefine('EISDIR') }}}, EINVAL: {{{ cDefine('EINVAL') }}}, ENFILE: {{{ cDefine('ENFILE') }}}, EMFILE: {{{ cDefine('EMFILE') }}}, ENOTTY: {{{ cDefine('ENOTTY') }}}, ETXTBSY: {{{ cDefine('ETXTBSY') }}}, EFBIG: {{{ cDefine('EFBIG') }}}, ENOSPC: {{{ cDefine('ENOSPC') }}}, ESPIPE: {{{ cDefine('ESPIPE') }}}, EROFS: {{{ cDefine('EROFS') }}}, EMLINK: {{{ cDefine('EMLINK') }}}, EPIPE: {{{ cDefine('EPIPE') }}}, EDOM: {{{ cDefine('EDOM') }}}, ERANGE: {{{ cDefine('ERANGE') }}}, ENOMSG: {{{ cDefine('ENOMSG') }}}, EIDRM: {{{ cDefine('EIDRM') }}}, ECHRNG: {{{ cDefine('ECHRNG') }}}, EL2NSYNC: {{{ cDefine('EL2NSYNC') }}}, EL3HLT: {{{ cDefine('EL3HLT') }}}, EL3RST: {{{ cDefine('EL3RST') }}}, ELNRNG: {{{ cDefine('ELNRNG') }}}, EUNATCH: {{{ cDefine('EUNATCH') }}}, ENOCSI: {{{ cDefine('ENOCSI') }}}, EL2HLT: {{{ cDefine('EL2HLT') }}}, EDEADLK: {{{ cDefine('EDEADLK') }}}, ENOLCK: {{{ cDefine('ENOLCK') }}}, EBADE: {{{ cDefine('EBADE') }}}, EBADR: {{{ cDefine('EBADR') }}}, EXFULL: {{{ cDefine('EXFULL') }}}, ENOANO: {{{ cDefine('ENOANO') }}}, EBADRQC: {{{ cDefine('EBADRQC') }}}, EBADSLT: {{{ cDefine('EBADSLT') }}}, EDEADLOCK: {{{ cDefine('EDEADLOCK') }}}, EBFONT: {{{ cDefine('EBFONT') }}}, ENOSTR: {{{ cDefine('ENOSTR') }}}, ENODATA: {{{ cDefine('ENODATA') }}}, ETIME: {{{ cDefine('ETIME') }}}, ENOSR: {{{ cDefine('ENOSR') }}}, ENONET: {{{ cDefine('ENONET') }}}, ENOPKG: {{{ cDefine('ENOPKG') }}}, EREMOTE: {{{ cDefine('EREMOTE') }}}, ENOLINK: {{{ cDefine('ENOLINK') }}}, EADV: {{{ cDefine('EADV') }}}, ESRMNT: {{{ cDefine('ESRMNT') }}}, ECOMM: {{{ cDefine('ECOMM') }}}, EPROTO: {{{ cDefine('EPROTO') }}}, EMULTIHOP: {{{ cDefine('EMULTIHOP') }}}, EDOTDOT: {{{ cDefine('EDOTDOT') }}}, EBADMSG: {{{ cDefine('EBADMSG') }}}, ENOTUNIQ: {{{ cDefine('ENOTUNIQ') }}}, EBADFD: {{{ cDefine('EBADFD') }}}, EREMCHG: {{{ cDefine('EREMCHG') }}}, ELIBACC: {{{ cDefine('ELIBACC') }}}, ELIBBAD: {{{ cDefine('ELIBBAD') }}}, ELIBSCN: {{{ cDefine('ELIBSCN') }}}, ELIBMAX: {{{ cDefine('ELIBMAX') }}}, ELIBEXEC: {{{ cDefine('ELIBEXEC') }}}, ENOSYS: {{{ cDefine('ENOSYS') }}}, ENOTEMPTY: {{{ cDefine('ENOTEMPTY') }}}, ENAMETOOLONG: {{{ cDefine('ENAMETOOLONG') }}}, ELOOP: {{{ cDefine('ELOOP') }}}, EOPNOTSUPP: {{{ cDefine('EOPNOTSUPP') }}}, EPFNOSUPPORT: {{{ cDefine('EPFNOSUPPORT') }}}, ECONNRESET: {{{ cDefine('ECONNRESET') }}}, ENOBUFS: {{{ cDefine('ENOBUFS') }}}, EAFNOSUPPORT: {{{ cDefine('EAFNOSUPPORT') }}}, EPROTOTYPE: {{{ cDefine('EPROTOTYPE') }}}, ENOTSOCK: {{{ cDefine('ENOTSOCK') }}}, ENOPROTOOPT: {{{ cDefine('ENOPROTOOPT') }}}, ESHUTDOWN: {{{ cDefine('ESHUTDOWN') }}}, ECONNREFUSED: {{{ cDefine('ECONNREFUSED') }}}, EADDRINUSE: {{{ cDefine('EADDRINUSE') }}}, ECONNABORTED: {{{ cDefine('ECONNABORTED') }}}, ENETUNREACH: {{{ cDefine('ENETUNREACH') }}}, ENETDOWN: {{{ cDefine('ENETDOWN') }}}, ETIMEDOUT: {{{ cDefine('ETIMEDOUT') }}}, EHOSTDOWN: {{{ cDefine('EHOSTDOWN') }}}, EHOSTUNREACH: {{{ cDefine('EHOSTUNREACH') }}}, EINPROGRESS: {{{ cDefine('EINPROGRESS') }}}, EALREADY: {{{ cDefine('EALREADY') }}}, EDESTADDRREQ: {{{ cDefine('EDESTADDRREQ') }}}, EMSGSIZE: {{{ cDefine('EMSGSIZE') }}}, EPROTONOSUPPORT: {{{ cDefine('EPROTONOSUPPORT') }}}, ESOCKTNOSUPPORT: {{{ cDefine('ESOCKTNOSUPPORT') }}}, EADDRNOTAVAIL: {{{ cDefine('EADDRNOTAVAIL') }}}, ENETRESET: {{{ cDefine('ENETRESET') }}}, EISCONN: {{{ cDefine('EISCONN') }}}, ENOTCONN: {{{ cDefine('ENOTCONN') }}}, ETOOMANYREFS: {{{ cDefine('ETOOMANYREFS') }}}, EUSERS: {{{ cDefine('EUSERS') }}}, EDQUOT: {{{ cDefine('EDQUOT') }}}, ESTALE: {{{ cDefine('ESTALE') }}}, ENOTSUP: {{{ cDefine('ENOTSUP') }}}, ENOMEDIUM: {{{ cDefine('ENOMEDIUM') }}}, EILSEQ: {{{ cDefine('EILSEQ') }}}, EOVERFLOW: {{{ cDefine('EOVERFLOW') }}}, ECANCELED: {{{ cDefine('ECANCELED') }}}, ENOTRECOVERABLE: {{{ cDefine('ENOTRECOVERABLE') }}}, EOWNERDEAD: {{{ cDefine('EOWNERDEAD') }}}, ESTRPIPE: {{{ cDefine('ESTRPIPE') }}}, }, $ERRNO_MESSAGES: { 0: 'Success', {{{ cDefine('EPERM') }}}: 'Not super-user', {{{ cDefine('ENOENT') }}}: 'No such file or directory', {{{ cDefine('ESRCH') }}}: 'No such process', {{{ cDefine('EINTR') }}}: 'Interrupted system call', {{{ cDefine('EIO') }}}: 'I/O error', {{{ cDefine('ENXIO') }}}: 'No such device or address', {{{ cDefine('E2BIG') }}}: 'Arg list too long', {{{ cDefine('ENOEXEC') }}}: 'Exec format error', {{{ cDefine('EBADF') }}}: 'Bad file number', {{{ cDefine('ECHILD') }}}: 'No children', {{{ cDefine('EWOULDBLOCK') }}}: 'No more processes', {{{ cDefine('ENOMEM') }}}: 'Not enough core', {{{ cDefine('EACCES') }}}: 'Permission denied', {{{ cDefine('EFAULT') }}}: 'Bad address', {{{ cDefine('ENOTBLK') }}}: 'Block device required', {{{ cDefine('EBUSY') }}}: 'Mount device busy', {{{ cDefine('EEXIST') }}}: 'File exists', {{{ cDefine('EXDEV') }}}: 'Cross-device link', {{{ cDefine('ENODEV') }}}: 'No such device', {{{ cDefine('ENOTDIR') }}}: 'Not a directory', {{{ cDefine('EISDIR') }}}: 'Is a directory', {{{ cDefine('EINVAL') }}}: 'Invalid argument', {{{ cDefine('ENFILE') }}}: 'Too many open files in system', {{{ cDefine('EMFILE') }}}: 'Too many open files', {{{ cDefine('ENOTTY') }}}: 'Not a typewriter', {{{ cDefine('ETXTBSY') }}}: 'Text file busy', {{{ cDefine('EFBIG') }}}: 'File too large', {{{ cDefine('ENOSPC') }}}: 'No space left on device', {{{ cDefine('ESPIPE') }}}: 'Illegal seek', {{{ cDefine('EROFS') }}}: 'Read only file system', {{{ cDefine('EMLINK') }}}: 'Too many links', {{{ cDefine('EPIPE') }}}: 'Broken pipe', {{{ cDefine('EDOM') }}}: 'Math arg out of domain of func', {{{ cDefine('ERANGE') }}}: 'Math result not representable', {{{ cDefine('ENOMSG') }}}: 'No message of desired type', {{{ cDefine('EIDRM') }}}: 'Identifier removed', {{{ cDefine('ECHRNG') }}}: 'Channel number out of range', {{{ cDefine('EL2NSYNC') }}}: 'Level 2 not synchronized', {{{ cDefine('EL3HLT') }}}: 'Level 3 halted', {{{ cDefine('EL3RST') }}}: 'Level 3 reset', {{{ cDefine('ELNRNG') }}}: 'Link number out of range', {{{ cDefine('EUNATCH') }}}: 'Protocol driver not attached', {{{ cDefine('ENOCSI') }}}: 'No CSI structure available', {{{ cDefine('EL2HLT') }}}: 'Level 2 halted', {{{ cDefine('EDEADLK') }}}: 'Deadlock condition', {{{ cDefine('ENOLCK') }}}: 'No record locks available', {{{ cDefine('EBADE') }}}: 'Invalid exchange', {{{ cDefine('EBADR') }}}: 'Invalid request descriptor', {{{ cDefine('EXFULL') }}}: 'Exchange full', {{{ cDefine('ENOANO') }}}: 'No anode', {{{ cDefine('EBADRQC') }}}: 'Invalid request code', {{{ cDefine('EBADSLT') }}}: 'Invalid slot', {{{ cDefine('EDEADLOCK') }}}: 'File locking deadlock error', {{{ cDefine('EBFONT') }}}: 'Bad font file fmt', {{{ cDefine('ENOSTR') }}}: 'Device not a stream', {{{ cDefine('ENODATA') }}}: 'No data (for no delay io)', {{{ cDefine('ETIME') }}}: 'Timer expired', {{{ cDefine('ENOSR') }}}: 'Out of streams resources', {{{ cDefine('ENONET') }}}: 'Machine is not on the network', {{{ cDefine('ENOPKG') }}}: 'Package not installed', {{{ cDefine('EREMOTE') }}}: 'The object is remote', {{{ cDefine('ENOLINK') }}}: 'The link has been severed', {{{ cDefine('EADV') }}}: 'Advertise error', {{{ cDefine('ESRMNT') }}}: 'Srmount error', {{{ cDefine('ECOMM') }}}: 'Communication error on send', {{{ cDefine('EPROTO') }}}: 'Protocol error', {{{ cDefine('EMULTIHOP') }}}: 'Multihop attempted', {{{ cDefine('EDOTDOT') }}}: 'Cross mount point (not really error)', {{{ cDefine('EBADMSG') }}}: 'Trying to read unreadable message', {{{ cDefine('ENOTUNIQ') }}}: 'Given log. name not unique', {{{ cDefine('EBADFD') }}}: 'f.d. invalid for this operation', {{{ cDefine('EREMCHG') }}}: 'Remote address changed', {{{ cDefine('ELIBACC') }}}: 'Can access a needed shared lib', {{{ cDefine('ELIBBAD') }}}: 'Accessing a corrupted shared lib', {{{ cDefine('ELIBSCN') }}}: '.lib section in a.out corrupted', {{{ cDefine('ELIBMAX') }}}: 'Attempting to link in too many libs', {{{ cDefine('ELIBEXEC') }}}: 'Attempting to exec a shared library', {{{ cDefine('ENOSYS') }}}: 'Function not implemented', {{{ cDefine('ENOTEMPTY') }}}: 'Directory not empty', {{{ cDefine('ENAMETOOLONG') }}}: 'File or path name too long', {{{ cDefine('ELOOP') }}}: 'Too many symbolic links', {{{ cDefine('EOPNOTSUPP') }}}: 'Operation not supported on transport endpoint', {{{ cDefine('EPFNOSUPPORT') }}}: 'Protocol family not supported', {{{ cDefine('ECONNRESET') }}}: 'Connection reset by peer', {{{ cDefine('ENOBUFS') }}}: 'No buffer space available', {{{ cDefine('EAFNOSUPPORT') }}}: 'Address family not supported by protocol family', {{{ cDefine('EPROTOTYPE') }}}: 'Protocol wrong type for socket', {{{ cDefine('ENOTSOCK') }}}: 'Socket operation on non-socket', {{{ cDefine('ENOPROTOOPT') }}}: 'Protocol not available', {{{ cDefine('ESHUTDOWN') }}}: 'Can\'t send after socket shutdown', {{{ cDefine('ECONNREFUSED') }}}: 'Connection refused', {{{ cDefine('EADDRINUSE') }}}: 'Address already in use', {{{ cDefine('ECONNABORTED') }}}: 'Connection aborted', {{{ cDefine('ENETUNREACH') }}}: 'Network is unreachable', {{{ cDefine('ENETDOWN') }}}: 'Network interface is not configured', {{{ cDefine('ETIMEDOUT') }}}: 'Connection timed out', {{{ cDefine('EHOSTDOWN') }}}: 'Host is down', {{{ cDefine('EHOSTUNREACH') }}}: 'Host is unreachable', {{{ cDefine('EINPROGRESS') }}}: 'Connection already in progress', {{{ cDefine('EALREADY') }}}: 'Socket already connected', {{{ cDefine('EDESTADDRREQ') }}}: 'Destination address required', {{{ cDefine('EMSGSIZE') }}}: 'Message too long', {{{ cDefine('EPROTONOSUPPORT') }}}: 'Unknown protocol', {{{ cDefine('ESOCKTNOSUPPORT') }}}: 'Socket type not supported', {{{ cDefine('EADDRNOTAVAIL') }}}: 'Address not available', {{{ cDefine('ENETRESET') }}}: 'Connection reset by network', {{{ cDefine('EISCONN') }}}: 'Socket is already connected', {{{ cDefine('ENOTCONN') }}}: 'Socket is not connected', {{{ cDefine('ETOOMANYREFS') }}}: 'Too many references', {{{ cDefine('EUSERS') }}}: 'Too many users', {{{ cDefine('EDQUOT') }}}: 'Quota exceeded', {{{ cDefine('ESTALE') }}}: 'Stale file handle', {{{ cDefine('ENOTSUP') }}}: 'Not supported', {{{ cDefine('ENOMEDIUM') }}}: 'No medium (in tape drive)', {{{ cDefine('EILSEQ') }}}: 'Illegal byte sequence', {{{ cDefine('EOVERFLOW') }}}: 'Value too large for defined data type', {{{ cDefine('ECANCELED') }}}: 'Operation canceled', {{{ cDefine('ENOTRECOVERABLE') }}}: 'State not recoverable', {{{ cDefine('EOWNERDEAD') }}}: 'Previous owner died', {{{ cDefine('ESTRPIPE') }}}: 'Streams pipe error', }, __setErrNo: function(value) { if (Module['___errno_location']) {{{ makeSetValue("Module['___errno_location']()", 0, 'value', 'i32') }}}; #if ASSERTIONS else Module.printErr('failed to set errno from JS'); #endif return value; }, // ========================================================================== // sched.h (stubs only - no thread support yet!) // ========================================================================== sched_yield: function() { return 0; }, // ========================================================================== // arpa/inet.h // ========================================================================== // old ipv4 only functions inet_addr__deps: ['_inet_pton4_raw'], inet_addr: function(ptr) { var addr = __inet_pton4_raw(Pointer_stringify(ptr)); if (addr === null) { return -1; } return addr; }, // ========================================================================== // netinet/in.h // ========================================================================== #if USE_PTHREADS in6addr_any: '; if (ENVIRONMENT_IS_PTHREAD) _in6addr_any = PthreadWorkerInit._in6addr_any; else PthreadWorkerInit._in6addr_any = _in6addr_any = allocate([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "i8", ALLOC_STATIC)', in6addr_loopback: '; if (ENVIRONMENT_IS_PTHREAD) _in6addr_loopback = PthreadWorkerInit._in6addr_loopback; else PthreadWorkerInit._in6addr_loopback = _in6addr_loopback = allocate([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1], "i8", ALLOC_STATIC)', #else in6addr_any: 'allocate([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "i8", ALLOC_STATIC)', in6addr_loopback: 'allocate([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1], "i8", ALLOC_STATIC)', #endif // ========================================================================== // netdb.h // ========================================================================== _inet_pton4_raw: function(str) { var b = str.split('.'); for (var i = 0; i < 4; i++) { var tmp = Number(b[i]); if (isNaN(tmp)) return null; b[i] = tmp; } return (b[0] | (b[1] << 8) | (b[2] << 16) | (b[3] << 24)) >>> 0; }, _inet_ntop4_raw: function(addr) { return (addr & 0xff) + '.' + ((addr >> 8) & 0xff) + '.' + ((addr >> 16) & 0xff) + '.' + ((addr >> 24) & 0xff) }, _inet_pton6_raw__deps: ['htons'], _inet_pton6_raw: function(str) { var words; var w, offset, z, i; /* http://home.deds.nl/~aeron/regex/ */ var valid6regx = /^((?=.*::)(?!.*::.+::)(::)?([\dA-F]{1,4}:(:|\b)|){5}|([\dA-F]{1,4}:){6})((([\dA-F]{1,4}((?!\3)::|:\b|$))|(?!\2\3)){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})$/i var parts = []; if (!valid6regx.test(str)) { return null; } if (str === "::") { return [0, 0, 0, 0, 0, 0, 0, 0]; } // Z placeholder to keep track of zeros when splitting the string on ":" if (str.indexOf("::") === 0) { str = str.replace("::", "Z:"); // leading zeros case } else { str = str.replace("::", ":Z:"); } if (str.indexOf(".") > 0) { // parse IPv4 embedded stress str = str.replace(new RegExp('[.]', 'g'), ":"); words = str.split(":"); words[words.length-4] = parseInt(words[words.length-4]) + parseInt(words[words.length-3])*256; words[words.length-3] = parseInt(words[words.length-2]) + parseInt(words[words.length-1])*256; words = words.slice(0, words.length-2); } else { words = str.split(":"); } offset = 0; z = 0; for (w=0; w < words.length; w++) { if (typeof words[w] === 'string') { if (words[w] === 'Z') { // compressed zeros - write appropriate number of zero words for (z = 0; z < (8 - words.length+1); z++) { parts[w+z] = 0; } offset = z-1; } else { // parse hex to field to 16-bit value and write it in network byte-order parts[w+offset] = _htons(parseInt(words[w],16)); } } else { // parsed IPv4 words parts[w+offset] = words[w]; } } return [ (parts[1] << 16) | parts[0], (parts[3] << 16) | parts[2], (parts[5] << 16) | parts[4], (parts[7] << 16) | parts[6] ]; }, _inet_pton6__deps: ['_inet_pton6_raw'], _inet_pton6: function(src, dst) { var ints = __inet_pton6_raw(Pointer_stringify(src)); if (ints === null) { return 0; } for (var i = 0; i < 4; i++) { {{{ makeSetValue('dst', 'i*4', 'ints[i]', 'i32') }}}; } return 1; }, _inet_ntop6_raw__deps: ['_inet_ntop4_raw'], _inet_ntop6_raw: function(ints) { // ref: http://www.ietf.org/rfc/rfc2373.txt - section 2.5.4 // Format for IPv4 compatible and mapped 128-bit IPv6 Addresses // 128-bits are split into eight 16-bit words // stored in network byte order (big-endian) // | 80 bits | 16 | 32 bits | // +-----------------------------------------------------------------+ // | 10 bytes | 2 | 4 bytes | // +--------------------------------------+--------------------------+ // + 5 words | 1 | 2 words | // +--------------------------------------+--------------------------+ // |0000..............................0000|0000| IPv4 ADDRESS | (compatible) // +--------------------------------------+----+---------------------+ // |0000..............................0000|FFFF| IPv4 ADDRESS | (mapped) // +--------------------------------------+----+---------------------+ var str = ""; var word = 0; var longest = 0; var lastzero = 0; var zstart = 0; var len = 0; var i = 0; var parts = [ ints[0] & 0xffff, (ints[0] >> 16), ints[1] & 0xffff, (ints[1] >> 16), ints[2] & 0xffff, (ints[2] >> 16), ints[3] & 0xffff, (ints[3] >> 16) ]; // Handle IPv4-compatible, IPv4-mapped, loopback and any/unspecified addresses var hasipv4 = true; var v4part = ""; // check if the 10 high-order bytes are all zeros (first 5 words) for (i = 0; i < 5; i++) { if (parts[i] !== 0) { hasipv4 = false; break; } } if (hasipv4) { // low-order 32-bits store an IPv4 address (bytes 13 to 16) (last 2 words) v4part = __inet_ntop4_raw(parts[6] | (parts[7] << 16)); // IPv4-mapped IPv6 address if 16-bit value (bytes 11 and 12) == 0xFFFF (6th word) if (parts[5] === -1) { str = "::ffff:"; str += v4part; return str; } // IPv4-compatible IPv6 address if 16-bit value (bytes 11 and 12) == 0x0000 (6th word) if (parts[5] === 0) { str = "::"; //special case IPv6 addresses if(v4part === "0.0.0.0") v4part = ""; // any/unspecified address if(v4part === "0.0.0.1") v4part = "1";// loopback address str += v4part; return str; } } // Handle all other IPv6 addresses // first run to find the longest contiguous zero words for (word = 0; word < 8; word++) { if (parts[word] === 0) { if (word - lastzero > 1) { len = 0; } lastzero = word; len++; } if (len > longest) { longest = len; zstart = word - longest + 1; } } for (word = 0; word < 8; word++) { if (longest > 1) { // compress contiguous zeros - to produce "::" if (parts[word] === 0 && word >= zstart && word < (zstart + longest) ) { if (word === zstart) { str += ":"; if (zstart === 0) str += ":"; //leading zeros case } continue; } } // converts 16-bit words from big-endian to little-endian before converting to hex string str += Number(_ntohs(parts[word] & 0xffff)).toString(16); str += word < 7 ? ":" : ""; } return str; }, _read_sockaddr__deps: ['$Sockets', '_inet_ntop4_raw', '_inet_ntop6_raw'], _read_sockaddr: function (sa, salen) { // family / port offsets are common to both sockaddr_in and sockaddr_in6 var family = {{{ makeGetValue('sa', C_STRUCTS.sockaddr_in.sin_family, 'i16') }}}; var port = _ntohs({{{ makeGetValue('sa', C_STRUCTS.sockaddr_in.sin_port, 'i16') }}}); var addr; switch (family) { case {{{ cDefine('AF_INET') }}}: if (salen !== {{{ C_STRUCTS.sockaddr_in.__size__ }}}) { return { errno: ERRNO_CODES.EINVAL }; } addr = {{{ makeGetValue('sa', C_STRUCTS.sockaddr_in.sin_addr.s_addr, 'i32') }}}; addr = __inet_ntop4_raw(addr); break; case {{{ cDefine('AF_INET6') }}}: if (salen !== {{{ C_STRUCTS.sockaddr_in6.__size__ }}}) { return { errno: ERRNO_CODES.EINVAL }; } addr = [ {{{ makeGetValue('sa', C_STRUCTS.sockaddr_in6.sin6_addr.__in6_union.__s6_addr+0, 'i32') }}}, {{{ makeGetValue('sa', C_STRUCTS.sockaddr_in6.sin6_addr.__in6_union.__s6_addr+4, 'i32') }}}, {{{ makeGetValue('sa', C_STRUCTS.sockaddr_in6.sin6_addr.__in6_union.__s6_addr+8, 'i32') }}}, {{{ makeGetValue('sa', C_STRUCTS.sockaddr_in6.sin6_addr.__in6_union.__s6_addr+12, 'i32') }}} ]; addr = __inet_ntop6_raw(addr); break; default: return { errno: ERRNO_CODES.EAFNOSUPPORT }; } return { family: family, addr: addr, port: port }; }, _write_sockaddr__deps: ['$Sockets', '_inet_pton4_raw', '_inet_pton6_raw'], _write_sockaddr: function (sa, family, addr, port) { switch (family) { case {{{ cDefine('AF_INET') }}}: addr = __inet_pton4_raw(addr); {{{ makeSetValue('sa', C_STRUCTS.sockaddr_in.sin_family, 'family', 'i16') }}}; {{{ makeSetValue('sa', C_STRUCTS.sockaddr_in.sin_addr.s_addr, 'addr', 'i32') }}}; {{{ makeSetValue('sa', C_STRUCTS.sockaddr_in.sin_port, '_htons(port)', 'i16') }}}; break; case {{{ cDefine('AF_INET6') }}}: addr = __inet_pton6_raw(addr); {{{ makeSetValue('sa', C_STRUCTS.sockaddr_in6.sin6_family, 'family', 'i32') }}}; {{{ makeSetValue('sa', C_STRUCTS.sockaddr_in6.sin6_addr.__in6_union.__s6_addr+0, 'addr[0]', 'i32') }}}; {{{ makeSetValue('sa', C_STRUCTS.sockaddr_in6.sin6_addr.__in6_union.__s6_addr+4, 'addr[1]', 'i32') }}}; {{{ makeSetValue('sa', C_STRUCTS.sockaddr_in6.sin6_addr.__in6_union.__s6_addr+8, 'addr[2]', 'i32') }}}; {{{ makeSetValue('sa', C_STRUCTS.sockaddr_in6.sin6_addr.__in6_union.__s6_addr+12, 'addr[3]', 'i32') }}}; {{{ makeSetValue('sa', C_STRUCTS.sockaddr_in6.sin6_port, '_htons(port)', 'i16') }}}; {{{ makeSetValue('sa', C_STRUCTS.sockaddr_in6.sin6_flowinfo, '0', 'i32') }}}; {{{ makeSetValue('sa', C_STRUCTS.sockaddr_in6.sin6_scope_id, '0', 'i32') }}}; break; default: return { errno: ERRNO_CODES.EAFNOSUPPORT }; } // kind of lame, but let's match _read_sockaddr's interface return {}; }, // We can't actually resolve hostnames in the browser, so instead // we're generating fake IP addresses with lookup_name that we can // resolve later on with lookup_addr. // We do the aliasing in 172.29.*.*, giving us 65536 possibilities. $DNS__deps: ['_inet_pton4_raw', '_inet_pton6_raw'], $DNS: { address_map: { id: 1, addrs: {}, names: {} }, lookup_name: function (name) { // If the name is already a valid ipv4 / ipv6 address, don't generate a fake one. var res = __inet_pton4_raw(name); if (res !== null) { return name; } res = __inet_pton6_raw(name); if (res !== null) { return name; } // See if this name is already mapped. var addr; if (DNS.address_map.addrs[name]) { addr = DNS.address_map.addrs[name]; } else { var id = DNS.address_map.id++; assert(id < 65535, 'exceeded max address mappings of 65535'); addr = '172.29.' + (id & 0xff) + '.' + (id & 0xff00); DNS.address_map.names[addr] = name; DNS.address_map.addrs[name] = addr; } return addr; }, lookup_addr: function (addr) { if (DNS.address_map.names[addr]) { return DNS.address_map.names[addr]; } return null; } }, // note: lots of leaking here! gethostbyaddr__deps: ['$DNS', 'gethostbyname', '_inet_ntop4_raw'], gethostbyaddr__proxy: 'sync', gethostbyaddr__sig: 'iiii', gethostbyaddr: function (addr, addrlen, type) { if (type !== {{{ cDefine('AF_INET') }}}) { ___setErrNo(ERRNO_CODES.EAFNOSUPPORT); // TODO: set h_errno return null; } addr = {{{ makeGetValue('addr', '0', 'i32') }}}; // addr is in_addr var host = __inet_ntop4_raw(addr); var lookup = DNS.lookup_addr(host); if (lookup) { host = lookup; } var hostp = allocate(intArrayFromString(host), 'i8', ALLOC_STACK); return _gethostbyname(hostp); }, gethostbyname__deps: ['$DNS', '_inet_pton4_raw'], gethostbyname__proxy: 'sync', gethostbyname__sig: 'ii', gethostbyname: function(name) { name = Pointer_stringify(name); // generate hostent var ret = _malloc({{{ C_STRUCTS.hostent.__size__ }}}); // XXX possibly leaked, as are others here var nameBuf = _malloc(name.length+1); stringToUTF8(name, nameBuf, name.length+1); {{{ makeSetValue('ret', C_STRUCTS.hostent.h_name, 'nameBuf', 'i8*') }}}; var aliasesBuf = _malloc(4); {{{ makeSetValue('aliasesBuf', '0', '0', 'i8*') }}}; {{{ makeSetValue('ret', C_STRUCTS.hostent.h_aliases, 'aliasesBuf', 'i8**') }}}; var afinet = {{{ cDefine('AF_INET') }}}; {{{ makeSetValue('ret', C_STRUCTS.hostent.h_addrtype, 'afinet', 'i32') }}}; {{{ makeSetValue('ret', C_STRUCTS.hostent.h_length, '4', 'i32') }}}; var addrListBuf = _malloc(12); {{{ makeSetValue('addrListBuf', '0', 'addrListBuf+8', 'i32*') }}}; {{{ makeSetValue('addrListBuf', '4', '0', 'i32*') }}}; {{{ makeSetValue('addrListBuf', '8', '__inet_pton4_raw(DNS.lookup_name(name))', 'i32') }}}; {{{ makeSetValue('ret', C_STRUCTS.hostent.h_addr_list, 'addrListBuf', 'i8**') }}}; return ret; }, gethostbyname_r__deps: ['gethostbyname'], gethostbyname_r__proxy: 'sync', gethostbyname_r__sig: 'iiiiiii', gethostbyname_r: function(name, ret, buf, buflen, out, err) { var data = _gethostbyname(name); _memcpy(ret, data, {{{ C_STRUCTS.hostent.__size__ }}}); _free(data); {{{ makeSetValue('err', '0', '0', 'i32') }}}; {{{ makeSetValue('out', '0', 'ret', '*') }}}; return 0; }, getaddrinfo__deps: ['$Sockets', '$DNS', '_inet_pton4_raw', '_inet_ntop4_raw', '_inet_pton6_raw', '_inet_ntop6_raw', '_write_sockaddr'], getaddrinfo__proxy: 'sync', getaddrinfo__sig: 'iiiii', getaddrinfo: function(node, service, hint, out) { // Note getaddrinfo currently only returns a single addrinfo with ai_next defaulting to NULL. When NULL // hints are specified or ai_family set to AF_UNSPEC or ai_socktype or ai_protocol set to 0 then we // really should provide a linked list of suitable addrinfo values. var addrs = []; var canon = null; var addr = 0; var port = 0; var flags = 0; var family = {{{ cDefine('AF_UNSPEC') }}}; var type = 0; var proto = 0; var ai, last; function allocaddrinfo(family, type, proto, canon, addr, port) { var sa, salen, ai; var res; salen = family === {{{ cDefine('AF_INET6') }}} ? {{{ C_STRUCTS.sockaddr_in6.__size__ }}} : {{{ C_STRUCTS.sockaddr_in.__size__ }}}; addr = family === {{{ cDefine('AF_INET6') }}} ? __inet_ntop6_raw(addr) : __inet_ntop4_raw(addr); sa = _malloc(salen); res = __write_sockaddr(sa, family, addr, port); assert(!res.errno); ai = _malloc({{{ C_STRUCTS.addrinfo.__size__ }}}); {{{ makeSetValue('ai', C_STRUCTS.addrinfo.ai_family, 'family', 'i32') }}}; {{{ makeSetValue('ai', C_STRUCTS.addrinfo.ai_socktype, 'type', 'i32') }}}; {{{ makeSetValue('ai', C_STRUCTS.addrinfo.ai_protocol, 'proto', 'i32') }}}; {{{ makeSetValue('ai', C_STRUCTS.addrinfo.ai_canonname, 'canon', 'i32') }}}; {{{ makeSetValue('ai', C_STRUCTS.addrinfo.ai_addr, 'sa', '*') }}}; if (family === {{{ cDefine('AF_INET6') }}}) { {{{ makeSetValue('ai', C_STRUCTS.addrinfo.ai_addrlen, C_STRUCTS.sockaddr_in6.__size__, 'i32') }}}; } else { {{{ makeSetValue('ai', C_STRUCTS.addrinfo.ai_addrlen, C_STRUCTS.sockaddr_in.__size__, 'i32') }}}; } {{{ makeSetValue('ai', C_STRUCTS.addrinfo.ai_next, '0', 'i32') }}}; return ai; } if (hint) { flags = {{{ makeGetValue('hint', C_STRUCTS.addrinfo.ai_flags, 'i32') }}}; family = {{{ makeGetValue('hint', C_STRUCTS.addrinfo.ai_family, 'i32') }}}; type = {{{ makeGetValue('hint', C_STRUCTS.addrinfo.ai_socktype, 'i32') }}}; proto = {{{ makeGetValue('hint', C_STRUCTS.addrinfo.ai_protocol, 'i32') }}}; } if (type && !proto) { proto = type === {{{ cDefine('SOCK_DGRAM') }}} ? {{{ cDefine('IPPROTO_UDP') }}} : {{{ cDefine('IPPROTO_TCP') }}}; } if (!type && proto) { type = proto === {{{ cDefine('IPPROTO_UDP') }}} ? {{{ cDefine('SOCK_DGRAM') }}} : {{{ cDefine('SOCK_STREAM') }}}; } // If type or proto are set to zero in hints we should really be returning multiple addrinfo values, but for // now default to a TCP STREAM socket so we can at least return a sensible addrinfo given NULL hints. if (proto === 0) { proto = {{{ cDefine('IPPROTO_TCP') }}}; } if (type === 0) { type = {{{ cDefine('SOCK_STREAM') }}}; } if (!node && !service) { return {{{ cDefine('EAI_NONAME') }}}; } if (flags & ~({{{ cDefine('AI_PASSIVE') }}}|{{{ cDefine('AI_CANONNAME') }}}|{{{ cDefine('AI_NUMERICHOST') }}}| {{{ cDefine('AI_NUMERICSERV') }}}|{{{ cDefine('AI_V4MAPPED') }}}|{{{ cDefine('AI_ALL') }}}|{{{ cDefine('AI_ADDRCONFIG') }}})) { return {{{ cDefine('EAI_BADFLAGS') }}}; } if (hint !== 0 && ({{{ makeGetValue('hint', C_STRUCTS.addrinfo.ai_flags, 'i32') }}} & {{{ cDefine('AI_CANONNAME') }}}) && !node) { return {{{ cDefine('EAI_BADFLAGS') }}}; } if (flags & {{{ cDefine('AI_ADDRCONFIG') }}}) { // TODO return {{{ cDefine('EAI_NONAME') }}}; } if (type !== 0 && type !== {{{ cDefine('SOCK_STREAM') }}} && type !== {{{ cDefine('SOCK_DGRAM') }}}) { return {{{ cDefine('EAI_SOCKTYPE') }}}; } if (family !== {{{ cDefine('AF_UNSPEC') }}} && family !== {{{ cDefine('AF_INET') }}} && family !== {{{ cDefine('AF_INET6') }}}) { return {{{ cDefine('EAI_FAMILY') }}}; } if (service) { service = Pointer_stringify(service); port = parseInt(service, 10); if (isNaN(port)) { if (flags & {{{ cDefine('AI_NUMERICSERV') }}}) { return {{{ cDefine('EAI_NONAME') }}}; } // TODO support resolving well-known service names from: // http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt return {{{ cDefine('EAI_SERVICE') }}}; } } if (!node) { if (family === {{{ cDefine('AF_UNSPEC') }}}) { family = {{{ cDefine('AF_INET') }}}; } if ((flags & {{{ cDefine('AI_PASSIVE') }}}) === 0) { if (family === {{{ cDefine('AF_INET') }}}) { addr = _htonl({{{ cDefine('INADDR_LOOPBACK') }}}); } else { addr = [0, 0, 0, 1]; } } ai = allocaddrinfo(family, type, proto, null, addr, port); {{{ makeSetValue('out', '0', 'ai', '*') }}}; return 0; } // // try as a numeric address // node = Pointer_stringify(node); addr = __inet_pton4_raw(node); if (addr !== null) { // incoming node is a valid ipv4 address if (family === {{{ cDefine('AF_UNSPEC') }}} || family === {{{ cDefine('AF_INET') }}}) { family = {{{ cDefine('AF_INET') }}}; } else if (family === {{{ cDefine('AF_INET6') }}} && (flags & {{{ cDefine('AI_V4MAPPED') }}})) { addr = [0, 0, _htonl(0xffff), addr]; family = {{{ cDefine('AF_INET6') }}}; } else { return {{{ cDefine('EAI_NONAME') }}}; } } else { addr = __inet_pton6_raw(node); if (addr !== null) { // incoming node is a valid ipv6 address if (family === {{{ cDefine('AF_UNSPEC') }}} || family === {{{ cDefine('AF_INET6') }}}) { family = {{{ cDefine('AF_INET6') }}}; } else { return {{{ cDefine('EAI_NONAME') }}}; } } } if (addr != null) { ai = allocaddrinfo(family, type, proto, node, addr, port); {{{ makeSetValue('out', '0', 'ai', '*') }}}; return 0; } if (flags & {{{ cDefine('AI_NUMERICHOST') }}}) { return {{{ cDefine('EAI_NONAME') }}}; } // // try as a hostname // // resolve the hostname to a temporary fake address node = DNS.lookup_name(node); addr = __inet_pton4_raw(node); if (family === {{{ cDefine('AF_UNSPEC') }}}) { family = {{{ cDefine('AF_INET') }}}; } else if (family === {{{ cDefine('AF_INET6') }}}) { addr = [0, 0, _htonl(0xffff), addr]; } ai = allocaddrinfo(family, type, proto, null, addr, port); {{{ makeSetValue('out', '0', 'ai', '*') }}}; return 0; }, getnameinfo__deps: ['$Sockets', '$DNS', '_read_sockaddr'], getnameinfo: function (sa, salen, node, nodelen, serv, servlen, flags) { var info = __read_sockaddr(sa, salen); if (info.errno) { return {{{ cDefine('EAI_FAMILY') }}}; } var port = info.port; var addr = info.addr; var overflowed = false; if (node && nodelen) { var lookup; if ((flags & {{{ cDefine('NI_NUMERICHOST') }}}) || !(lookup = DNS.lookup_addr(addr))) { if (flags & {{{ cDefine('NI_NAMEREQD') }}}) { return {{{ cDefine('EAI_NONAME') }}}; } } else { addr = lookup; } var numBytesWrittenExclNull = stringToUTF8(addr, node, nodelen); if (numBytesWrittenExclNull+1 >= nodelen) { overflowed = true; } } if (serv && servlen) { port = '' + port; var numBytesWrittenExclNull = stringToUTF8(port, serv, servlen); if (numBytesWrittenExclNull+1 >= servlen) { overflowed = true; } } if (overflowed) { // Note: even when we overflow, getnameinfo() is specced to write out the truncated results. return {{{ cDefine('EAI_OVERFLOW') }}}; } return 0; }, // Can't use a literal for $GAI_ERRNO_MESSAGES as was done for $ERRNO_MESSAGES as the keys (e.g. EAI_BADFLAGS) // are actually negative numbers and you can't have expressions as keys in JavaScript literals. $GAI_ERRNO_MESSAGES: {}, gai_strerror__deps: ['$GAI_ERRNO_MESSAGES'], gai_strerror: function(val) { var buflen = 256; // On first call to gai_strerror we initialise the buffer and populate the error messages. if (!_gai_strerror.buffer) { _gai_strerror.buffer = _malloc(buflen); GAI_ERRNO_MESSAGES['0'] = 'Success'; GAI_ERRNO_MESSAGES['' + {{{ cDefine('EAI_BADFLAGS') }}}] = 'Invalid value for \'ai_flags\' field'; GAI_ERRNO_MESSAGES['' + {{{ cDefine('EAI_NONAME') }}}] = 'NAME or SERVICE is unknown'; GAI_ERRNO_MESSAGES['' + {{{ cDefine('EAI_AGAIN') }}}] = 'Temporary failure in name resolution'; GAI_ERRNO_MESSAGES['' + {{{ cDefine('EAI_FAIL') }}}] = 'Non-recoverable failure in name res'; GAI_ERRNO_MESSAGES['' + {{{ cDefine('EAI_FAMILY') }}}] = '\'ai_family\' not supported'; GAI_ERRNO_MESSAGES['' + {{{ cDefine('EAI_SOCKTYPE') }}}] = '\'ai_socktype\' not supported'; GAI_ERRNO_MESSAGES['' + {{{ cDefine('EAI_SERVICE') }}}] = 'SERVICE not supported for \'ai_socktype\''; GAI_ERRNO_MESSAGES['' + {{{ cDefine('EAI_MEMORY') }}}] = 'Memory allocation failure'; GAI_ERRNO_MESSAGES['' + {{{ cDefine('EAI_SYSTEM') }}}] = 'System error returned in \'errno\''; GAI_ERRNO_MESSAGES['' + {{{ cDefine('EAI_OVERFLOW') }}}] = 'Argument buffer overflow'; } var msg = 'Unknown error'; if (val in GAI_ERRNO_MESSAGES) { if (GAI_ERRNO_MESSAGES[val].length > buflen - 1) { msg = 'Message too long'; // EMSGSIZE message. This should never occur given the GAI_ERRNO_MESSAGES above. } else { msg = GAI_ERRNO_MESSAGES[val]; } } writeAsciiToMemory(msg, _gai_strerror.buffer); return _gai_strerror.buffer; }, // Implement netdb.h protocol entry (getprotoent, getprotobyname, getprotobynumber, setprotoent, endprotoent) // http://pubs.opengroup.org/onlinepubs/9699919799/functions/getprotobyname.html // The Protocols object holds our 'fake' protocols 'database'. $Protocols: { list: [], map: {} }, setprotoent__deps: ['$Protocols'], setprotoent: function(stayopen) { // void setprotoent(int stayopen); // Allocate and populate a protoent structure given a name, protocol number and array of aliases function allocprotoent(name, proto, aliases) { // write name into buffer var nameBuf = _malloc(name.length + 1); writeAsciiToMemory(name, nameBuf); // write aliases into buffer var j = 0; var length = aliases.length; var aliasListBuf = _malloc((length + 1) * 4); // Use length + 1 so we have space for the terminating NULL ptr. for (var i = 0; i < length; i++, j += 4) { var alias = aliases[i]; var aliasBuf = _malloc(alias.length + 1); writeAsciiToMemory(alias, aliasBuf); {{{ makeSetValue('aliasListBuf', 'j', 'aliasBuf', 'i8*') }}}; } {{{ makeSetValue('aliasListBuf', 'j', '0', 'i8*') }}}; // Terminating NULL pointer. // generate protoent var pe = _malloc({{{ C_STRUCTS.protoent.__size__ }}}); {{{ makeSetValue('pe', C_STRUCTS.protoent.p_name, 'nameBuf', 'i8*') }}}; {{{ makeSetValue('pe', C_STRUCTS.protoent.p_aliases, 'aliasListBuf', 'i8**') }}}; {{{ makeSetValue('pe', C_STRUCTS.protoent.p_proto, 'proto', 'i32') }}}; return pe; }; // Populate the protocol 'database'. The entries are limited to tcp and udp, though it is fairly trivial // to add extra entries from /etc/protocols if desired - though not sure if that'd actually be useful. var list = Protocols.list; var map = Protocols.map; if (list.length === 0) { var entry = allocprotoent('tcp', 6, ['TCP']); list.push(entry); map['tcp'] = map['6'] = entry; entry = allocprotoent('udp', 17, ['UDP']); list.push(entry); map['udp'] = map['17'] = entry; } _setprotoent.index = 0; }, endprotoent: function() { // void endprotoent(void); // We're not using a real protocol database so we don't do a real close. }, getprotoent__deps: ['setprotoent', '$Protocols'], getprotoent: function(number) { // struct protoent *getprotoent(void); // reads the next entry from the protocols 'database' or return NULL if 'eof' if (_setprotoent.index === Protocols.list.length) { return 0; } else { var result = Protocols.list[_setprotoent.index++]; return result; } }, getprotobyname__deps: ['setprotoent', '$Protocols'], getprotobyname: function(name) { // struct protoent *getprotobyname(const char *); name = Pointer_stringify(name); _setprotoent(true); var result = Protocols.map[name]; return result; }, getprotobynumber__deps: ['setprotoent', '$Protocols'], getprotobynumber: function(number) { // struct protoent *getprotobynumber(int proto); _setprotoent(true); var result = Protocols.map[number]; return result; }, // ========================================================================== // sockets. Note that the implementation assumes all sockets are always // nonblocking // ========================================================================== #if SOCKET_WEBRTC $Sockets__deps: ['__setErrNo', '$ERRNO_CODES', function() { return 'var SocketIO = ' + read('socket.io.js') + ';\n' }, function() { return 'var Peer = ' + read('wrtcp.js') + ';\n' }], #else $Sockets__deps: ['__setErrNo', '$ERRNO_CODES'], #endif $Sockets: { BUFFER_SIZE: 10*1024, // initial size MAX_BUFFER_SIZE: 10*1024*1024, // maximum size we will grow the buffer nextFd: 1, fds: {}, nextport: 1, maxport: 65535, peer: null, connections: {}, portmap: {}, localAddr: 0xfe00000a, // Local address is always 10.0.0.254 addrPool: [ 0x0200000a, 0x0300000a, 0x0400000a, 0x0500000a, 0x0600000a, 0x0700000a, 0x0800000a, 0x0900000a, 0x0a00000a, 0x0b00000a, 0x0c00000a, 0x0d00000a, 0x0e00000a] /* 0x0100000a is reserved */ }, // pwd.h getpwnam: function() { throw 'getpwnam: TODO' }, setpwent: function() { throw 'setpwent: TODO' }, getpwent: function() { throw 'getpwent: TODO' }, endpwent: function() { throw 'endpwent: TODO' }, // ========================================================================== // emscripten.h // ========================================================================== emscripten_run_script: function(ptr) { {{{ makeEval('eval(Pointer_stringify(ptr));') }}} }, emscripten_run_script_int: function(ptr) { {{{ makeEval('return eval(Pointer_stringify(ptr))|0;') }}} }, emscripten_run_script_string: function(ptr) { {{{ makeEval("var s = eval(Pointer_stringify(ptr)) + '';") }}} var me = _emscripten_run_script_string; var len = lengthBytesUTF8(s); if (!me.bufferSize || me.bufferSize < len+1) { if (me.bufferSize) _free(me.buffer); me.bufferSize = len+1; me.buffer = _malloc(me.bufferSize); } stringToUTF8(s, me.buffer, me.bufferSize); return me.buffer; }, emscripten_random: function() { return Math.random(); }, emscripten_get_now: function() { abort() }, // replaced by the postset at startup time emscripten_get_now__postset: "if (ENVIRONMENT_IS_NODE) {\n" + " _emscripten_get_now = function _emscripten_get_now_actual() {\n" + " var t = process['hrtime']();\n" + " return t[0] * 1e3 + t[1] / 1e6;\n" + " };\n" + "} else if (typeof dateNow !== 'undefined') {\n" + " _emscripten_get_now = dateNow;\n" + "} else if (typeof self === 'object' && self['performance'] && typeof self['performance']['now'] === 'function') {\n" + " _emscripten_get_now = function() { return self['performance']['now'](); };\n" + "} else if (typeof performance === 'object' && typeof performance['now'] === 'function') {\n" + " _emscripten_get_now = function() { return performance['now'](); };\n" + "} else {\n" + " _emscripten_get_now = Date.now;\n" + "}", emscripten_get_now_res: function() { // return resolution of get_now, in nanoseconds if (ENVIRONMENT_IS_NODE) { return 1; // nanoseconds } else if (typeof dateNow !== 'undefined' || ((ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) && self['performance'] && self['performance']['now'])) { return 1000; // microseconds (1/1000 of a millisecond) } else { return 1000*1000; // milliseconds } }, emscripten_get_now_is_monotonic__deps: ['emscripten_get_now'], emscripten_get_now_is_monotonic: function() { // return whether emscripten_get_now is guaranteed monotonic; the Date.now // implementation is not :( return ENVIRONMENT_IS_NODE || (typeof dateNow !== 'undefined') || ((ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) && self['performance'] && self['performance']['now']); }, // Returns [parentFuncArguments, functionName, paramListName] _emscripten_traverse_stack: function(args) { if (!args || !args.callee || !args.callee.name) { return [null, '', '']; } var funstr = args.callee.toString(); var funcname = args.callee.name; var str = '('; var first = true; for (var i in args) { var a = args[i]; if (!first) { str += ", "; } first = false; if (typeof a === 'number' || typeof a === 'string') { str += a; } else { str += '(' + typeof a + ')'; } } str += ')'; var caller = args.callee.caller; args = caller ? caller.arguments : []; if (first) str = ''; return [args, funcname, str]; }, emscripten_get_callstack_js__deps: ['_emscripten_traverse_stack'], emscripten_get_callstack_js: function(flags) { var callstack = jsStackTrace(); // Find the symbols in the callstack that corresponds to the functions that report callstack information, and remove everyhing up to these from the output. var iThisFunc = callstack.lastIndexOf('_emscripten_log'); var iThisFunc2 = callstack.lastIndexOf('_emscripten_get_callstack'); var iNextLine = callstack.indexOf('\n', Math.max(iThisFunc, iThisFunc2))+1; callstack = callstack.slice(iNextLine); // If user requested to see the original source stack, but no source map information is available, just fall back to showing the JS stack. if (flags & 8/*EM_LOG_C_STACK*/ && typeof emscripten_source_map === 'undefined') { warnOnce('Source map information is not available, emscripten_log with EM_LOG_C_STACK will be ignored. Build with "--pre-js $EMSCRIPTEN/src/emscripten-source-map.min.js" linker flag to add source map loading to code.'); flags ^= 8/*EM_LOG_C_STACK*/; flags |= 16/*EM_LOG_JS_STACK*/; } var stack_args = null; if (flags & 128 /*EM_LOG_FUNC_PARAMS*/) { // To get the actual parameters to the functions, traverse the stack via the unfortunately deprecated 'arguments.callee' method, if it works: stack_args = __emscripten_traverse_stack(arguments); while (stack_args[1].indexOf('_emscripten_') >= 0) stack_args = __emscripten_traverse_stack(stack_args[0]); } // Process all lines: var lines = callstack.split('\n'); callstack = ''; var newFirefoxRe = new RegExp('\\s*(.*?)@(.*?):([0-9]+):([0-9]+)'); // New FF30 with column info: extract components of form ' Object._main@http://server.com:4324:12' var firefoxRe = new RegExp('\\s*(.*?)@(.*):(.*)(:(.*))?'); // Old FF without column info: extract components of form ' Object._main@http://server.com:4324' var chromeRe = new RegExp('\\s*at (.*?) \\\((.*):(.*):(.*)\\\)'); // Extract components of form ' at Object._main (http://server.com/file.html:4324:12)' for (var l in lines) { var line = lines[l]; var jsSymbolName = ''; var file = ''; var lineno = 0; var column = 0; var parts = chromeRe.exec(line); if (parts && parts.length == 5) { jsSymbolName = parts[1]; file = parts[2]; lineno = parts[3]; column = parts[4]; } else { parts = newFirefoxRe.exec(line); if (!parts) parts = firefoxRe.exec(line); if (parts && parts.length >= 4) { jsSymbolName = parts[1]; file = parts[2]; lineno = parts[3]; column = parts[4]|0; // Old Firefox doesn't carry column information, but in new FF30, it is present. See https://bugzilla.mozilla.org/show_bug.cgi?id=762556 } else { // Was not able to extract this line for demangling/sourcemapping purposes. Output it as-is. callstack += line + '\n'; continue; } } // Try to demangle the symbol, but fall back to showing the original JS symbol name if not available. var cSymbolName = (flags & 32/*EM_LOG_DEMANGLE*/) ? demangle(jsSymbolName) : jsSymbolName; if (!cSymbolName) { cSymbolName = jsSymbolName; } var haveSourceMap = false; if (flags & 8/*EM_LOG_C_STACK*/) { var orig = emscripten_source_map.originalPositionFor({line: lineno, column: column}); haveSourceMap = (orig && orig.source); if (haveSourceMap) { if (flags & 64/*EM_LOG_NO_PATHS*/) { orig.source = orig.source.substring(orig.source.replace(/\\/g, "/").lastIndexOf('/')+1); } callstack += ' at ' + cSymbolName + ' (' + orig.source + ':' + orig.line + ':' + orig.column + ')\n'; } } if ((flags & 16/*EM_LOG_JS_STACK*/) || !haveSourceMap) { if (flags & 64/*EM_LOG_NO_PATHS*/) { file = file.substring(file.replace(/\\/g, "/").lastIndexOf('/')+1); } callstack += (haveSourceMap ? (' = '+jsSymbolName) : (' at '+cSymbolName)) + ' (' + file + ':' + lineno + ':' + column + ')\n'; } // If we are still keeping track with the callstack by traversing via 'arguments.callee', print the function parameters as well. if (flags & 128 /*EM_LOG_FUNC_PARAMS*/ && stack_args[0]) { if (stack_args[1] == jsSymbolName && stack_args[2].length > 0) { callstack = callstack.replace(/\s+$/, ''); callstack += ' with values: ' + stack_args[1] + stack_args[2] + '\n'; } stack_args = __emscripten_traverse_stack(stack_args[0]); } } // Trim extra whitespace at the end of the output. callstack = callstack.replace(/\s+$/, ''); return callstack; }, emscripten_get_callstack__deps: ['emscripten_get_callstack_js'], emscripten_get_callstack: function(flags, str, maxbytes) { var callstack = _emscripten_get_callstack_js(flags); // User can query the required amount of bytes to hold the callstack. if (!str || maxbytes <= 0) { return lengthBytesUTF8(callstack)+1; } // Output callstack string as C string to HEAP. var bytesWrittenExcludingNull = stringToUTF8(callstack, str, maxbytes); // Return number of bytes written, including null. return bytesWrittenExcludingNull+1; }, emscripten_log_js__deps: ['emscripten_get_callstack_js'], emscripten_log_js: function(flags, str) { if (flags & 24/*EM_LOG_C_STACK | EM_LOG_JS_STACK*/) { str = str.replace(/\s+$/, ''); // Ensure the message and the callstack are joined cleanly with exactly one newline. str += (str.length > 0 ? '\n' : '') + _emscripten_get_callstack_js(flags); } if (flags & 1 /*EM_LOG_CONSOLE*/) { if (flags & 4 /*EM_LOG_ERROR*/) { console.error(str); } else if (flags & 2 /*EM_LOG_WARN*/) { console.warn(str); } else { console.log(str); } } else if (flags & 6 /*EM_LOG_ERROR|EM_LOG_WARN*/) { Module.printErr(str); } else { Module.print(str); } }, emscripten_log__deps: ['_formatString', 'emscripten_log_js'], emscripten_log: function(flags, varargs) { // Extract the (optionally-existing) printf format specifier field from varargs. var format = {{{ makeGetValue('varargs', '0', 'i32', undefined, undefined, true) }}}; varargs += {{{ Math.max(Runtime.getNativeFieldSize('i32'), Runtime.getAlignSize('i32', null, true)) }}}; var str = ''; if (format) { var result = __formatString(format, varargs); for(var i = 0 ; i < result.length; ++i) { str += String.fromCharCode(result[i]); } } _emscripten_log_js(flags, str); }, emscripten_get_compiler_setting: function(name) { name = Pointer_stringify(name); var ret = getCompilerSetting(name); if (typeof ret === 'number') return ret; if (!_emscripten_get_compiler_setting.cache) _emscripten_get_compiler_setting.cache = {}; var cache = _emscripten_get_compiler_setting.cache; var fullname = name + '__str'; var fullret = cache[fullname]; if (fullret) return fullret; return cache[fullname] = allocate(intArrayFromString(ret + ''), 'i8', ALLOC_NORMAL); }, emscripten_debugger: function() { debugger; }, emscripten_print_double: function(x, to, max) { var str = x + ''; if (to) return stringToUTF8(str, to, max); else return lengthBytesUTF8(str); }, //============================ // i64 math //============================ i64Add__asm: true, i64Add__sig: 'iiiii', i64Add: function(a, b, c, d) { /* x = a + b*2^32 y = c + d*2^32 result = l + h*2^32 */ a = a|0; b = b|0; c = c|0; d = d|0; var l = 0, h = 0; l = (a + c)>>>0; h = (b + d + (((l>>>0) < (a>>>0))|0))>>>0; // Add carry from low word to high word on overflow. {{{ makeStructuralReturn(['l|0', 'h'], true) }}}; }, i64Subtract__asm: true, i64Subtract__sig: 'iiiii', i64Subtract: function(a, b, c, d) { a = a|0; b = b|0; c = c|0; d = d|0; var l = 0, h = 0; l = (a - c)>>>0; h = (b - d)>>>0; h = (b - d - (((c>>>0) > (a>>>0))|0))>>>0; // Borrow one from high word to low word on underflow. {{{ makeStructuralReturn(['l|0', 'h'], true) }}}; }, bitshift64Shl__asm: true, bitshift64Shl__sig: 'iiii', bitshift64Shl: function(low, high, bits) { low = low|0; high = high|0; bits = bits|0; var ander = 0; if ((bits|0) < 32) { ander = ((1 << bits) - 1)|0; {{{ makeSetTempRet0('(high << bits) | ((low&(ander << (32 - bits))) >>> (32 - bits))') }}}; return low << bits; } {{{ makeSetTempRet0('low << (bits - 32)') }}}; return 0; }, bitshift64Ashr__asm: true, bitshift64Ashr__sig: 'iiii', bitshift64Ashr: function(low, high, bits) { low = low|0; high = high|0; bits = bits|0; var ander = 0; if ((bits|0) < 32) { ander = ((1 << bits) - 1)|0; {{{ makeSetTempRet0('high >> bits') }}}; return (low >>> bits) | ((high&ander) << (32 - bits)); } {{{ makeSetTempRet0('(high|0) < 0 ? -1 : 0') }}}; return (high >> (bits - 32))|0; }, bitshift64Lshr__asm: true, bitshift64Lshr__sig: 'iiii', bitshift64Lshr: function(low, high, bits) { low = low|0; high = high|0; bits = bits|0; var ander = 0; if ((bits|0) < 32) { ander = ((1 << bits) - 1)|0; {{{ makeSetTempRet0('high >>> bits') }}}; return (low >>> bits) | ((high&ander) << (32 - bits)); } {{{ makeSetTempRet0('0') }}}; return (high >>> (bits - 32))|0; }, // misc shims for musl __lock: function() {}, __unlock: function() {}, __lockfile: function() { return 1 }, __unlockfile: function(){}, // ubsan (undefined behavior sanitizer) support __ubsan_handle_float_cast_overflow: function(id, post) { abort('Undefined behavior! ubsan_handle_float_cast_overflow: ' + [id, post]); }, // USE_FULL_LIBRARY hacks realloc: function() { throw 'bad' }, // libunwind _Unwind_Backtrace__deps: ['emscripten_get_callstack_js'], _Unwind_Backtrace: function(func, arg) { var trace = _emscripten_get_callstack_js(); var parts = trace.split('\n'); for (var i = 0; i < parts.length; i++) { var ret = Module['dynCall_iii'](func, 0, arg); if (ret !== 0) return; } }, _Unwind_GetIPInfo: function() { abort('Unwind_GetIPInfo'); }, _Unwind_FindEnclosingFunction: function() { return 0; // we cannot succeed }, _Unwind_RaiseException__deps: ['__cxa_throw'], _Unwind_RaiseException: function(ex) { Module.printErr('Warning: _Unwind_RaiseException is not correctly implemented'); return ___cxa_throw(ex, 0, 0); }, _Unwind_DeleteException: function(ex) { Module.printErr('TODO: Unwind_DeleteException'); }, // autodebugging emscripten_autodebug_i64: function(line, valuel, valueh) { Module.print('AD:' + [line, valuel, valueh]); }, emscripten_autodebug_i32: function(line, value) { Module.print('AD:' + [line, value]); }, emscripten_autodebug_i16: function(line, value) { Module.print('AD:' + [line, value]); }, emscripten_autodebug_i8: function(line, value) { Module.print('AD:' + [line, value]); }, emscripten_autodebug_float: function(line, value) { Module.print('AD:' + [line, value]); }, emscripten_autodebug_double: function(line, value) { Module.print('AD:' + [line, value]); }, // misc definitions to avoid unnecessary unresolved symbols from fastcomp emscripten_prep_setjmp: true, emscripten_cleanup_setjmp: true, emscripten_check_longjmp: true, emscripten_get_longjmp_result: true, emscripten_setjmp: true, emscripten_preinvoke: true, emscripten_postinvoke: true, emscripten_resume: true, emscripten_landingpad: true, getHigh32: true, setHigh32: true, FtoILow: true, FtoIHigh: true, DtoILow: true, DtoIHigh: true, BDtoILow: true, BDtoIHigh: true, SItoF: true, UItoF: true, SItoD: true, UItoD: true, BItoD: true, llvm_dbg_value: true, llvm_debugtrap: true, llvm_ctlz_i32: true, emscripten_asm_const: true, emscripten_asm_const_int: true, emscripten_asm_const_double: true, emscripten_asm_const_int_sync_on_main_thread: true, emscripten_asm_const_double_sync_on_main_thread: true, emscripten_asm_const_async_on_main_thread: true, // ======== compiled code from system/lib/compiler-rt , see readme therein __muldsi3__asm: true, __muldsi3__sig: 'iii', __muldsi3: function($a, $b) { $a = $a | 0; $b = $b | 0; var $1 = 0, $2 = 0, $3 = 0, $6 = 0, $8 = 0, $11 = 0, $12 = 0; $1 = $a & 65535; $2 = $b & 65535; $3 = Math_imul($2, $1) | 0; $6 = $a >>> 16; $8 = ($3 >>> 16) + (Math_imul($2, $6) | 0) | 0; $11 = $b >>> 16; $12 = Math_imul($11, $1) | 0; return ({{{ makeSetTempRet0('(($8 >>> 16) + (Math_imul($11, $6) | 0) | 0) + ((($8 & 65535) + $12 | 0) >>> 16) | 0') }}}, 0 | ($8 + $12 << 16 | $3 & 65535)) | 0; }, __divdi3__sig: 'iiiii', __divdi3__asm: true, __divdi3__deps: ['__udivmoddi4', 'i64Subtract'], __divdi3: function($a$0, $a$1, $b$0, $b$1) { $a$0 = $a$0 | 0; $a$1 = $a$1 | 0; $b$0 = $b$0 | 0; $b$1 = $b$1 | 0; var $1$0 = 0, $1$1 = 0, $2$0 = 0, $2$1 = 0, $4$0 = 0, $4$1 = 0, $6$0 = 0, $7$0 = 0, $7$1 = 0, $8$0 = 0, $10$0 = 0; $1$0 = $a$1 >> 31 | (($a$1 | 0) < 0 ? -1 : 0) << 1; $1$1 = (($a$1 | 0) < 0 ? -1 : 0) >> 31 | (($a$1 | 0) < 0 ? -1 : 0) << 1; $2$0 = $b$1 >> 31 | (($b$1 | 0) < 0 ? -1 : 0) << 1; $2$1 = (($b$1 | 0) < 0 ? -1 : 0) >> 31 | (($b$1 | 0) < 0 ? -1 : 0) << 1; $4$0 = _i64Subtract($1$0 ^ $a$0 | 0, $1$1 ^ $a$1 | 0, $1$0 | 0, $1$1 | 0) | 0; $4$1 = {{{ makeGetTempRet0() }}}; $6$0 = _i64Subtract($2$0 ^ $b$0 | 0, $2$1 ^ $b$1 | 0, $2$0 | 0, $2$1 | 0) | 0; $7$0 = $2$0 ^ $1$0; $7$1 = $2$1 ^ $1$1; $8$0 = ___udivmoddi4($4$0, $4$1, $6$0, {{{ makeGetTempRet0() }}}, 0) | 0; $10$0 = _i64Subtract($8$0 ^ $7$0 | 0, {{{ makeGetTempRet0() }}} ^ $7$1 | 0, $7$0 | 0, $7$1 | 0) | 0; return $10$0 | 0; }, __remdi3__sig: 'iiiii', __remdi3__asm: true, __remdi3__deps: ['__udivmoddi4', 'i64Subtract'], __remdi3: function($a$0, $a$1, $b$0, $b$1) { $a$0 = $a$0 | 0; $a$1 = $a$1 | 0; $b$0 = $b$0 | 0; $b$1 = $b$1 | 0; var $rem = 0, $1$0 = 0, $1$1 = 0, $2$0 = 0, $2$1 = 0, $4$0 = 0, $4$1 = 0, $6$0 = 0, $10$0 = 0, $10$1 = 0, __stackBase__ = 0; __stackBase__ = STACKTOP; STACKTOP = STACKTOP + 16 | 0; $rem = __stackBase__ | 0; $1$0 = $a$1 >> 31 | (($a$1 | 0) < 0 ? -1 : 0) << 1; $1$1 = (($a$1 | 0) < 0 ? -1 : 0) >> 31 | (($a$1 | 0) < 0 ? -1 : 0) << 1; $2$0 = $b$1 >> 31 | (($b$1 | 0) < 0 ? -1 : 0) << 1; $2$1 = (($b$1 | 0) < 0 ? -1 : 0) >> 31 | (($b$1 | 0) < 0 ? -1 : 0) << 1; $4$0 = _i64Subtract($1$0 ^ $a$0 | 0, $1$1 ^ $a$1 | 0, $1$0 | 0, $1$1 | 0) | 0; $4$1 = {{{ makeGetTempRet0() }}}; $6$0 = _i64Subtract($2$0 ^ $b$0 | 0, $2$1 ^ $b$1 | 0, $2$0 | 0, $2$1 | 0) | 0; ___udivmoddi4($4$0, $4$1, $6$0, {{{ makeGetTempRet0() }}}, $rem) | 0; $10$0 = _i64Subtract(HEAP32[$rem >> 2] ^ $1$0 | 0, HEAP32[$rem + 4 >> 2] ^ $1$1 | 0, $1$0 | 0, $1$1 | 0) | 0; $10$1 = {{{ makeGetTempRet0() }}}; STACKTOP = __stackBase__; return ({{{ makeSetTempRet0('$10$1') }}}, $10$0) | 0; }, __muldi3__sig: 'iiiii', __muldi3__asm: true, __muldi3__deps: ['__muldsi3'], __muldi3: function($a$0, $a$1, $b$0, $b$1) { $a$0 = $a$0 | 0; $a$1 = $a$1 | 0; $b$0 = $b$0 | 0; $b$1 = $b$1 | 0; var $x_sroa_0_0_extract_trunc = 0, $y_sroa_0_0_extract_trunc = 0, $1$0 = 0, $1$1 = 0, $2 = 0; $x_sroa_0_0_extract_trunc = $a$0; $y_sroa_0_0_extract_trunc = $b$0; $1$0 = ___muldsi3($x_sroa_0_0_extract_trunc, $y_sroa_0_0_extract_trunc) | 0; $1$1 = {{{ makeGetTempRet0() }}}; $2 = Math_imul($a$1, $y_sroa_0_0_extract_trunc) | 0; return ({{{ makeSetTempRet0('((Math_imul($b$1, $x_sroa_0_0_extract_trunc) | 0) + $2 | 0) + $1$1 | $1$1 & 0') }}}, 0 | $1$0 & -1) | 0; }, __udivdi3__sig: 'iiiii', __udivdi3__asm: true, __udivdi3__deps: ['__udivmoddi4'], __udivdi3: function($a$0, $a$1, $b$0, $b$1) { $a$0 = $a$0 | 0; $a$1 = $a$1 | 0; $b$0 = $b$0 | 0; $b$1 = $b$1 | 0; var $1$0 = 0; $1$0 = ___udivmoddi4($a$0, $a$1, $b$0, $b$1, 0) | 0; return $1$0 | 0; }, __uremdi3__sig: 'iiiii', __uremdi3__asm: true, __uremdi3__deps: ['__udivmoddi4'], __uremdi3: function($a$0, $a$1, $b$0, $b$1) { $a$0 = $a$0 | 0; $a$1 = $a$1 | 0; $b$0 = $b$0 | 0; $b$1 = $b$1 | 0; var $rem = 0, __stackBase__ = 0; __stackBase__ = STACKTOP; STACKTOP = STACKTOP + 16 | 0; $rem = __stackBase__ | 0; ___udivmoddi4($a$0, $a$1, $b$0, $b$1, $rem) | 0; STACKTOP = __stackBase__; return ({{{ makeSetTempRet0('HEAP32[$rem + 4 >> 2] | 0') }}}, HEAP32[$rem >> 2] | 0) | 0; }, __udivmoddi4__sig: 'iiiiii', __udivmoddi4__asm: true, __udivmoddi4__deps: ['i64Add', 'i64Subtract', 'llvm_cttz_i32'], __udivmoddi4: function($a$0, $a$1, $b$0, $b$1, $rem) { $a$0 = $a$0 | 0; $a$1 = $a$1 | 0; $b$0 = $b$0 | 0; $b$1 = $b$1 | 0; $rem = $rem | 0; var $n_sroa_0_0_extract_trunc = 0, $n_sroa_1_4_extract_shift$0 = 0, $n_sroa_1_4_extract_trunc = 0, $d_sroa_0_0_extract_trunc = 0, $d_sroa_1_4_extract_shift$0 = 0, $d_sroa_1_4_extract_trunc = 0, $4 = 0, $17 = 0, $37 = 0, $49 = 0, $51 = 0, $57 = 0, $58 = 0, $66 = 0, $78 = 0, $86 = 0, $88 = 0, $89 = 0, $91 = 0, $92 = 0, $95 = 0, $105 = 0, $117 = 0, $119 = 0, $125 = 0, $126 = 0, $130 = 0, $q_sroa_1_1_ph = 0, $q_sroa_0_1_ph = 0, $r_sroa_1_1_ph = 0, $r_sroa_0_1_ph = 0, $sr_1_ph = 0, $d_sroa_0_0_insert_insert99$0 = 0, $d_sroa_0_0_insert_insert99$1 = 0, $137$0 = 0, $137$1 = 0, $carry_0203 = 0, $sr_1202 = 0, $r_sroa_0_1201 = 0, $r_sroa_1_1200 = 0, $q_sroa_0_1199 = 0, $q_sroa_1_1198 = 0, $147 = 0, $149 = 0, $r_sroa_0_0_insert_insert42$0 = 0, $r_sroa_0_0_insert_insert42$1 = 0, $150$1 = 0, $151$0 = 0, $152 = 0, $154$0 = 0, $r_sroa_0_0_extract_trunc = 0, $r_sroa_1_4_extract_trunc = 0, $155 = 0, $carry_0_lcssa$0 = 0, $carry_0_lcssa$1 = 0, $r_sroa_0_1_lcssa = 0, $r_sroa_1_1_lcssa = 0, $q_sroa_0_1_lcssa = 0, $q_sroa_1_1_lcssa = 0, $q_sroa_0_0_insert_ext75$0 = 0, $q_sroa_0_0_insert_ext75$1 = 0, $q_sroa_0_0_insert_insert77$1 = 0, $_0$0 = 0, $_0$1 = 0; $n_sroa_0_0_extract_trunc = $a$0; $n_sroa_1_4_extract_shift$0 = $a$1; $n_sroa_1_4_extract_trunc = $n_sroa_1_4_extract_shift$0; $d_sroa_0_0_extract_trunc = $b$0; $d_sroa_1_4_extract_shift$0 = $b$1; $d_sroa_1_4_extract_trunc = $d_sroa_1_4_extract_shift$0; if (($n_sroa_1_4_extract_trunc | 0) == 0) { $4 = ($rem | 0) != 0; if (($d_sroa_1_4_extract_trunc | 0) == 0) { if ($4) { HEAP32[$rem >> 2] = ($n_sroa_0_0_extract_trunc >>> 0) % ($d_sroa_0_0_extract_trunc >>> 0); HEAP32[$rem + 4 >> 2] = 0; } $_0$1 = 0; $_0$0 = ($n_sroa_0_0_extract_trunc >>> 0) / ($d_sroa_0_0_extract_trunc >>> 0) >>> 0; return ({{{ makeSetTempRet0('$_0$1') }}}, $_0$0) | 0; } else { if (!$4) { $_0$1 = 0; $_0$0 = 0; return ({{{ makeSetTempRet0('$_0$1') }}}, $_0$0) | 0; } HEAP32[$rem >> 2] = $a$0 & -1; HEAP32[$rem + 4 >> 2] = $a$1 & 0; $_0$1 = 0; $_0$0 = 0; return ({{{ makeSetTempRet0('$_0$1') }}}, $_0$0) | 0; } } $17 = ($d_sroa_1_4_extract_trunc | 0) == 0; do { if (($d_sroa_0_0_extract_trunc | 0) == 0) { if ($17) { if (($rem | 0) != 0) { HEAP32[$rem >> 2] = ($n_sroa_1_4_extract_trunc >>> 0) % ($d_sroa_0_0_extract_trunc >>> 0); HEAP32[$rem + 4 >> 2] = 0; } $_0$1 = 0; $_0$0 = ($n_sroa_1_4_extract_trunc >>> 0) / ($d_sroa_0_0_extract_trunc >>> 0) >>> 0; return ({{{ makeSetTempRet0('$_0$1') }}}, $_0$0) | 0; } if (($n_sroa_0_0_extract_trunc | 0) == 0) { if (($rem | 0) != 0) { HEAP32[$rem >> 2] = 0; HEAP32[$rem + 4 >> 2] = ($n_sroa_1_4_extract_trunc >>> 0) % ($d_sroa_1_4_extract_trunc >>> 0); } $_0$1 = 0; $_0$0 = ($n_sroa_1_4_extract_trunc >>> 0) / ($d_sroa_1_4_extract_trunc >>> 0) >>> 0; return ({{{ makeSetTempRet0('$_0$1') }}}, $_0$0) | 0; } $37 = $d_sroa_1_4_extract_trunc - 1 | 0; if (($37 & $d_sroa_1_4_extract_trunc | 0) == 0) { if (($rem | 0) != 0) { HEAP32[$rem >> 2] = 0 | $a$0 & -1; HEAP32[$rem + 4 >> 2] = $37 & $n_sroa_1_4_extract_trunc | $a$1 & 0; } $_0$1 = 0; $_0$0 = $n_sroa_1_4_extract_trunc >>> ((_llvm_cttz_i32($d_sroa_1_4_extract_trunc | 0) | 0) >>> 0); return ({{{ makeSetTempRet0('$_0$1') }}}, $_0$0) | 0; } $49 = Math_clz32($d_sroa_1_4_extract_trunc | 0) | 0; $51 = $49 - (Math_clz32($n_sroa_1_4_extract_trunc | 0) | 0) | 0; if ($51 >>> 0 <= 30) { $57 = $51 + 1 | 0; $58 = 31 - $51 | 0; $sr_1_ph = $57; $r_sroa_0_1_ph = $n_sroa_1_4_extract_trunc << $58 | $n_sroa_0_0_extract_trunc >>> ($57 >>> 0); $r_sroa_1_1_ph = $n_sroa_1_4_extract_trunc >>> ($57 >>> 0); $q_sroa_0_1_ph = 0; $q_sroa_1_1_ph = $n_sroa_0_0_extract_trunc << $58; break; } if (($rem | 0) == 0) { $_0$1 = 0; $_0$0 = 0; return ({{{ makeSetTempRet0('$_0$1') }}}, $_0$0) | 0; } HEAP32[$rem >> 2] = 0 | $a$0 & -1; HEAP32[$rem + 4 >> 2] = $n_sroa_1_4_extract_shift$0 | $a$1 & 0; $_0$1 = 0; $_0$0 = 0; return ({{{ makeSetTempRet0('$_0$1') }}}, $_0$0) | 0; } else { if (!$17) { $117 = Math_clz32($d_sroa_1_4_extract_trunc | 0) | 0; $119 = $117 - (Math_clz32($n_sroa_1_4_extract_trunc | 0) | 0) | 0; if ($119 >>> 0 <= 31) { $125 = $119 + 1 | 0; $126 = 31 - $119 | 0; $130 = $119 - 31 >> 31; $sr_1_ph = $125; $r_sroa_0_1_ph = $n_sroa_0_0_extract_trunc >>> ($125 >>> 0) & $130 | $n_sroa_1_4_extract_trunc << $126; $r_sroa_1_1_ph = $n_sroa_1_4_extract_trunc >>> ($125 >>> 0) & $130; $q_sroa_0_1_ph = 0; $q_sroa_1_1_ph = $n_sroa_0_0_extract_trunc << $126; break; } if (($rem | 0) == 0) { $_0$1 = 0; $_0$0 = 0; return ({{{ makeSetTempRet0('$_0$1') }}}, $_0$0) | 0; } HEAP32[$rem >> 2] = 0 | $a$0 & -1; HEAP32[$rem + 4 >> 2] = $n_sroa_1_4_extract_shift$0 | $a$1 & 0; $_0$1 = 0; $_0$0 = 0; return ({{{ makeSetTempRet0('$_0$1') }}}, $_0$0) | 0; } $66 = $d_sroa_0_0_extract_trunc - 1 | 0; if (($66 & $d_sroa_0_0_extract_trunc | 0) != 0) { $86 = (Math_clz32($d_sroa_0_0_extract_trunc | 0) | 0) + 33 | 0; $88 = $86 - (Math_clz32($n_sroa_1_4_extract_trunc | 0) | 0) | 0; $89 = 64 - $88 | 0; $91 = 32 - $88 | 0; $92 = $91 >> 31; $95 = $88 - 32 | 0; $105 = $95 >> 31; $sr_1_ph = $88; $r_sroa_0_1_ph = $91 - 1 >> 31 & $n_sroa_1_4_extract_trunc >>> ($95 >>> 0) | ($n_sroa_1_4_extract_trunc << $91 | $n_sroa_0_0_extract_trunc >>> ($88 >>> 0)) & $105; $r_sroa_1_1_ph = $105 & $n_sroa_1_4_extract_trunc >>> ($88 >>> 0); $q_sroa_0_1_ph = $n_sroa_0_0_extract_trunc << $89 & $92; $q_sroa_1_1_ph = ($n_sroa_1_4_extract_trunc << $89 | $n_sroa_0_0_extract_trunc >>> ($95 >>> 0)) & $92 | $n_sroa_0_0_extract_trunc << $91 & $88 - 33 >> 31; break; } if (($rem | 0) != 0) { HEAP32[$rem >> 2] = $66 & $n_sroa_0_0_extract_trunc; HEAP32[$rem + 4 >> 2] = 0; } if (($d_sroa_0_0_extract_trunc | 0) == 1) { $_0$1 = $n_sroa_1_4_extract_shift$0 | $a$1 & 0; $_0$0 = 0 | $a$0 & -1; return ({{{ makeSetTempRet0('$_0$1') }}}, $_0$0) | 0; } else { $78 = _llvm_cttz_i32($d_sroa_0_0_extract_trunc | 0) | 0; $_0$1 = 0 | $n_sroa_1_4_extract_trunc >>> ($78 >>> 0); $_0$0 = $n_sroa_1_4_extract_trunc << 32 - $78 | $n_sroa_0_0_extract_trunc >>> ($78 >>> 0) | 0; return ({{{ makeSetTempRet0('$_0$1') }}}, $_0$0) | 0; } } } while (0); if (($sr_1_ph | 0) == 0) { $q_sroa_1_1_lcssa = $q_sroa_1_1_ph; $q_sroa_0_1_lcssa = $q_sroa_0_1_ph; $r_sroa_1_1_lcssa = $r_sroa_1_1_ph; $r_sroa_0_1_lcssa = $r_sroa_0_1_ph; $carry_0_lcssa$1 = 0; $carry_0_lcssa$0 = 0; } else { $d_sroa_0_0_insert_insert99$0 = 0 | $b$0 & -1; $d_sroa_0_0_insert_insert99$1 = $d_sroa_1_4_extract_shift$0 | $b$1 & 0; $137$0 = _i64Add($d_sroa_0_0_insert_insert99$0 | 0, $d_sroa_0_0_insert_insert99$1 | 0, -1, -1) | 0; $137$1 = {{{ makeGetTempRet0() }}}; $q_sroa_1_1198 = $q_sroa_1_1_ph; $q_sroa_0_1199 = $q_sroa_0_1_ph; $r_sroa_1_1200 = $r_sroa_1_1_ph; $r_sroa_0_1201 = $r_sroa_0_1_ph; $sr_1202 = $sr_1_ph; $carry_0203 = 0; while (1) { $147 = $q_sroa_0_1199 >>> 31 | $q_sroa_1_1198 << 1; $149 = $carry_0203 | $q_sroa_0_1199 << 1; $r_sroa_0_0_insert_insert42$0 = 0 | ($r_sroa_0_1201 << 1 | $q_sroa_1_1198 >>> 31); $r_sroa_0_0_insert_insert42$1 = $r_sroa_0_1201 >>> 31 | $r_sroa_1_1200 << 1 | 0; _i64Subtract($137$0 | 0, $137$1 | 0, $r_sroa_0_0_insert_insert42$0 | 0, $r_sroa_0_0_insert_insert42$1 | 0) | 0; $150$1 = {{{ makeGetTempRet0() }}}; $151$0 = $150$1 >> 31 | (($150$1 | 0) < 0 ? -1 : 0) << 1; $152 = $151$0 & 1; $154$0 = _i64Subtract($r_sroa_0_0_insert_insert42$0 | 0, $r_sroa_0_0_insert_insert42$1 | 0, $151$0 & $d_sroa_0_0_insert_insert99$0 | 0, ((($150$1 | 0) < 0 ? -1 : 0) >> 31 | (($150$1 | 0) < 0 ? -1 : 0) << 1) & $d_sroa_0_0_insert_insert99$1 | 0) | 0; $r_sroa_0_0_extract_trunc = $154$0; $r_sroa_1_4_extract_trunc = {{{ makeGetTempRet0() }}}; $155 = $sr_1202 - 1 | 0; if (($155 | 0) == 0) { break; } else { $q_sroa_1_1198 = $147; $q_sroa_0_1199 = $149; $r_sroa_1_1200 = $r_sroa_1_4_extract_trunc; $r_sroa_0_1201 = $r_sroa_0_0_extract_trunc; $sr_1202 = $155; $carry_0203 = $152; } } $q_sroa_1_1_lcssa = $147; $q_sroa_0_1_lcssa = $149; $r_sroa_1_1_lcssa = $r_sroa_1_4_extract_trunc; $r_sroa_0_1_lcssa = $r_sroa_0_0_extract_trunc; $carry_0_lcssa$1 = 0; $carry_0_lcssa$0 = $152; } $q_sroa_0_0_insert_ext75$0 = $q_sroa_0_1_lcssa; $q_sroa_0_0_insert_ext75$1 = 0; $q_sroa_0_0_insert_insert77$1 = $q_sroa_1_1_lcssa | $q_sroa_0_0_insert_ext75$1; if (($rem | 0) != 0) { HEAP32[$rem >> 2] = 0 | $r_sroa_0_1_lcssa; HEAP32[$rem + 4 >> 2] = $r_sroa_1_1_lcssa | 0; } $_0$1 = (0 | $q_sroa_0_0_insert_ext75$0) >>> 31 | $q_sroa_0_0_insert_insert77$1 << 1 | ($q_sroa_0_0_insert_ext75$1 << 1 | $q_sroa_0_0_insert_ext75$0 >>> 31) & 0 | $carry_0_lcssa$1; $_0$0 = ($q_sroa_0_0_insert_ext75$0 << 1 | 0 >>> 31) & -2 | $carry_0_lcssa$0; return ({{{ makeSetTempRet0('$_0$1') }}}, $_0$0) | 0; }, // ======================================================================= }; function autoAddDeps(object, name) { name = [name]; for (var item in object) { if (item.substr(-6) != '__deps') { if (!object[item + '__deps']) { object[item + '__deps'] = name; } else { object[item + '__deps'].push(name[0]); // add to existing list } } } }
sin: 'Math_sin', sinf: 'Math_sin', sinl: 'Math_sin',
network_qt5.py
from PyQt5.QtCore import QDateTime, Qt, QTimer, pyqtSignal, QObject from PyQt5.QtWidgets import (QDialog, QApplication, QLabel, QCheckBox, QHBoxLayout, QVBoxLayout, QPushButton, QLineEdit, QSpinBox, QFormLayout, QGridLayout, QStyleFactory) import os import sys import json import time import threading import webbrowser from GUI.modules.NetworkTest import NetworkTest as ntc FilePath = os.path.join(".", "GUI", "defaults.json") def loadDefaults(FilePath): with open(FilePath) as data: defaults = json.load(data) return defaults class Screen(QDialog): def __init__(self, defaults, parent=None): super(Screen, self).__init__(parent) self.doPingTest = defaults["doPingTest"] self.doSpeedTest = defaults["doSpeedTest"] self.interval = defaults["interval"] self.ping_target = defaults["ping_target"] self.threads = defaults["threads"] self.path = defaults["path"] self.ping_file_name = defaults["ping_file_name"] self.speed_test_file_name = defaults["speed_test_file_name"]
self.testThread = threading.Thread(name = 'runTest', target = self.test.run_network_test, args = () ) self.testThread.daemon = True self.testThread.start() self.generateScreen() def createParameterLayout(self): self.pingTargetTextBox = QLineEdit(self.ping_target) self.pingIntervalSpinBox = QSpinBox() self.pingIntervalSpinBox.setValue(self.interval) self.threadsSpinBox = QSpinBox() self.threadsSpinBox.setValue(self.threads) self.resultFilePathTextBox = QLineEdit(self.path) self.pingResultFileNameTextBox = QLineEdit(self.ping_file_name) self.speedResultFileNameTextBox = QLineEdit(self.speed_test_file_name) self.clearOldResultCheckBox = QCheckBox("Clear Old Results?") self.clearOldResultCheckBox.setChecked(self.clear) parameterLayout = QFormLayout() parameterLayout.addRow("Ping Target: ", self.pingTargetTextBox) parameterLayout.addRow("Ping Interval: ", self.pingIntervalSpinBox) parameterLayout.addRow("Threads to use: ", self.threadsSpinBox) parameterLayout.addRow("Results File Path: ", self.resultFilePathTextBox) parameterLayout.addRow("Ping Test Result File Name: ", self.pingResultFileNameTextBox) parameterLayout.addRow("Speed Test Result File Name: ", self.speedResultFileNameTextBox) parameterLayout.addRow(self.clearOldResultCheckBox) return parameterLayout def createTestLayout(self): testLabel = QLabel("Select the test you want to perform: ") self.pingTestCheckbox = QCheckBox("&Ping Test") self.pingTestCheckbox.setChecked(self.doPingTest) self.speedTestCheckbox = QCheckBox("&Speed Test") self.speedTestCheckbox.setChecked(self.doSpeedTest) testLayout = QHBoxLayout() testLayout.addWidget(testLabel) testLayout.addWidget(self.pingTestCheckbox) testLayout.addWidget(self.speedTestCheckbox) return testLayout def creatActionButtonsLayout(self): startTestButton = QPushButton("Start Test") startTestButton.clicked.connect(self.startTest) endTestButton = QPushButton("End Test") endTestButton.clicked.connect(self.endTest) actionButtonsLayout = QHBoxLayout() actionButtonsLayout.addWidget(startTestButton) actionButtonsLayout.addWidget(endTestButton) return actionButtonsLayout def createRightActionPanel(self): generateGraphButton = QPushButton("Generate Graph") generateGraphButton.clicked.connect(self.generateGraph) viewGraphButton = QPushButton("View Graph") viewGraphButton.clicked.connect(self.viewGraph) rightActionLayout = QVBoxLayout() rightActionLayout.addWidget(generateGraphButton) rightActionLayout.addWidget(viewGraphButton) return rightActionLayout def generateScreen(self): testLayout = self.createTestLayout() actionButtonsLayout = self.creatActionButtonsLayout() parameterLayout = self.createParameterLayout() rightActionLayout = self.createRightActionPanel() mainLayout = QGridLayout() mainLayout.addLayout(parameterLayout, 0, 0, 7, 2) mainLayout.addLayout(testLayout, 8, 0, 1, 2) mainLayout.addLayout(actionButtonsLayout, 9, 0, 1, 2) mainLayout.addLayout(rightActionLayout, 0, 2, 2, 1) self.setLayout(mainLayout) self.setWindowTitle("Network Test") QApplication.setStyle(QStyleFactory.create('Fusion')) # start test on button press. Read form fields, update variables and start test def startTest(self): print("Test Started") self.doPingTest = self.pingTestCheckbox.isChecked() self.doSpeedTest = self.speedTestCheckbox.isChecked() self.interval = self.pingIntervalSpinBox.value() self.ping_target = self.pingTargetTextBox.text() self.threads = self.threadsSpinBox.value() self.path = self.resultFilePathTextBox.text() self.ping_file_name = self.pingResultFileNameTextBox.text() self.speed_test_file_name = self.speedResultFileNameTextBox.text() self.clear = self.clearOldResultCheckBox.isChecked() updatedVariables = { "doPingTest": self.doPingTest, "doSpeedTest": self.doSpeedTest, "interval": self.interval, "ping_target": self.ping_target, "threads": self.threads, "path": self.path, "ping_file_name": self.ping_file_name, "speed_test_file_name": self.speed_test_file_name, "clear": self.clear } self.test.updateTestVariables(updatedVariables) self.test.startTest() def endTest(self): self.test.endTest() print("Test Ended") def generateGraph(self): print("Generate Graph Started") self.test.generate_and_save_all_plots() def viewGraph(self): print("Opening Graph") webbrowser.open('file://' + os.path.realpath(os.path.join(self.path, "webpage", "index.html"))) def testCallback(self, msg): # print(msg) pass app = QApplication(sys.argv) screen = Screen(loadDefaults(FilePath)) screen.show() sys.exit(app.exec_())
self.clear = defaults["clear"] self.test = ntc(defaults, self.testCallback)
index.js
const AWS = require("aws-sdk"); const stepFunctions = new AWS.StepFunctions(); exports.handler = async (event, context) => { let jobId; try { jobId = JSON.parse(event.body).jobId } catch(error) { console.log(error) return getResponse(400, 'Invalid Job-Id provided') } const params = { stateMachineArn: process.env.STEP_FUNCTION_ARN, name: jobId, input: JSON.stringify({"jobId": jobId}) }; try { await stepFunctions.startExecution(params).promise() return getResponse(200, 'Successfully started Job-Execution'); } catch (error){ console.log(error) if (error.code == 'ExecutionAlreadyExists') { return getResponse(400, 'Execution already exists'); } else { return getResponse(500, 'Problem while starting Job-Execution'); } } } function
(statusCode, message) { return { statusCode: statusCode, body: JSON.stringify({message: message}) }; }
getResponse
script.js
var lista = [] var res = document.querySelector('div#res') function adicionar(){ var numero = document.querySelector('input#txtnum') var num = Number(numero.value) var tab = document.querySelector('select#txttab') if (num < 1 || num > 100){ //Verifica se o valor é válido (entre 1 e 100) window.alert('Digite um valor válido!') numero.value = null } else if (lista.includes(num)){ //Verifica se o valor já foi digitado window.alert('Valor já adicionado! Não repita os valores!') numero.value = null } else{ lista.push(num) var item = document.createElement('option') item.text = `Valor ${num} adicionado` numero.value = null tab.appendChild(item) res.innerHTML = '' } } function calcular(){ res.innerHTML = `<p>Ao todo, temos ${lista.length} número(s) cadastrado(s)</p> <p>O maior valor informado foi ${Math.max.apply(null, lista)}</p> <p>O menor valor informado foi ${Math.min.apply(null, lista)}</p> <p>A soma de todos os valores é ${somaValores(lista)}</p> <p>A média dos valores digitados é ${media(lista)}</p>` } function somaValore
var soma = 0 for(c in list){ soma += list[c] } return soma } function media(list){ var soma = somaValores(list) var quant = list.length return soma/quant }
s(list){
endpoints.go
package generate import ( "github.com/gobuffalo/packr" "github.com/lukasjarosch/godin/internal/template" "github.com/vetcher/go-astra/types" "github.com/sirupsen/logrus" "github.com/lukasjarosch/godin/internal/parse" "github.com/pkg/errors" "fmt" "github.com/spf13/viper" ) type Endpoints struct { BaseGenerator } func NewEndpoints(box packr.Box, serviceInterface *types.Interface, ctx template.Context, options ...Option) *Endpoints { defaults := &Options{ Context: ctx, Overwrite: true, IsGoSource: false, // FIXME: set to 'true' Template: "endpoints", TargetFile: "internal/service/endpoint/endpoints.go", } for _, opt := range options { opt(defaults) } return &Endpoints{ BaseGenerator{ box: box, iface: serviceInterface, opts: defaults, }, } } func (e *Endpoints) GenerateMissing() error { implementation := parse.NewEndpointsParser(e.opts.TargetFile, e.iface) if err := implementation.Parse(); err != nil { return errors.Wrap(err, "Endpoints.Parse") } if len(implementation.MissingEndpoints) > 0 { for _, missingEndpoint := range implementation.MissingEndpoints { // we miss the service name at this point since 'MethodFromType' cannot extract that m := template.MethodFromType(missingEndpoint) m.ServiceName = viper.GetString("service.name") tpl := template.NewPartial("endpoint", true) data, err := tpl.Render(e.box, m) if err != nil
writer := template.NewFileAppendWriter(e.opts.TargetFile, data) if err := writer.Write(); err != nil { return errors.Wrap(err, fmt.Sprintf("failed to append-write to %s", e.TargetPath())) } logrus.Infof("added missing endpoint to %s: %s", e.opts.TargetFile, missingEndpoint) } } return nil } func (e *Endpoints) Update() error { if e.TargetExists() { return e.GenerateMissing() } return e.GenerateFull() }
{ return errors.Wrap(err, "failed to render partial") }
convert-to-spaces.pipe.ts
import { Pipe, PipeTransform } from '@angular/core'; @Pipe({
name: 'convertToSpaces' }) export class ConvertToSpacesPipe implements PipeTransform { transform( value: string, character: string ): string { return value.replace(character, ' '); } }
test_status_update.py
"""Unit tests for reviewboard.reviews.models.base_comment.StatusUpdate.""" from __future__ import unicode_literals from django.contrib.auth.models import AnonymousUser, Permission, User from djblets.testing.decorators import add_fixtures from reviewboard.accounts.models import LocalSiteProfile from reviewboard.testing import TestCase class StatusUpdateTests(TestCase): """Unit tests for reviewboard.reviews.models.base_comment.StatusUpdate.""" fixtures = ['test_users'] def test_is_mutable_by_with_anonymous(self): """Testing StatusUpdate.is_mutable_by with anonymous user""" review_request = self.create_review_request() status_update = self.create_status_update(review_request) self.assertFalse(status_update.is_mutable_by(AnonymousUser())) def test_is_mutable_by_with_owner(self): """Testing StatusUpdate.is_mutable_by with owner""" review_request = self.create_review_request() status_update = self.create_status_update(review_request) self.assertTrue(status_update.is_mutable_by(status_update.user)) def test_is_mutable_by_with_other_user(self): """Testing StatusUpdate.is_mutable_by with other user""" other_user = User.objects.create(username='other-user') review_request = self.create_review_request() status_update = self.create_status_update(review_request) self.assertFalse(status_update.is_mutable_by(other_user)) def
(self): """Testing StatusUpdate.is_mutable_by with other user with change_statusupdate permission """ other_user = User.objects.create(username='other-user') other_user.user_permissions.add( Permission.objects.get(codename='change_statusupdate')) review_request = self.create_review_request() status_update = self.create_status_update(review_request) self.assertTrue(status_update.is_mutable_by(other_user)) @add_fixtures(['test_site']) def test_is_mutable_by_with_other_user_with_perm_same_local_site(self): """Testing StatusUpdate.is_mutable_by with other user on same LocalSite with change_statusupdate permission """ review_request = self.create_review_request(with_local_site=True) status_update = self.create_status_update(review_request) other_user = User.objects.create(username='other-user') site = review_request.local_site site.users.add(other_user) site_profile = other_user.get_site_profile(site) site_profile.permissions = { 'reviews.change_statusupdate': True, } site_profile.save(update_fields=('permissions',)) self.assertTrue(status_update.is_mutable_by(other_user))
test_is_mutable_by_with_other_user_and_can_change_status_perm
derive_object.rs
use proc_macro2::TokenStream; use quote::quote; use syn::{self, Data, Fields}; use crate::util; pub fn build_derive_object(ast: syn::DeriveInput, is_internal: bool) -> TokenStream { let struct_fields = match ast.data { Data::Struct(data) => match data.fields { Fields::Named(fields) => fields.named, _ => { panic!("#[derive(GraphQLObject)] may only be used on regular structs with fields"); } }, _ => { panic!("#[derive(GraphlQLObject)] may only be applied to structs, not to enums"); } }; // Parse attributes. let attrs = match util::ObjectAttributes::from_attrs(&ast.attrs) { Ok(a) => a, Err(e) => { panic!("Invalid #[graphql(...)] attribute: {}", e); } }; if attrs.interfaces.len() > 0 { panic!("Invalid #[graphql(...)] attribute 'interfaces': #[derive(GraphQLObject) does not support 'interfaces'"); } let ident = &ast.ident; let name = attrs.name.unwrap_or_else(|| ident.to_string()); let fields = struct_fields.into_iter().filter_map(|field| { let field_attrs = match util::FieldAttributes::from_attrs( field.attrs, util::FieldAttributeParseMode::Object, ) { Ok(attrs) => attrs, Err(e) => panic!("Invalid #[graphql] attribute: \n{}", e), }; if field_attrs.skip { None } else { let field_name = field.ident.unwrap(); let name = field_attrs .name .clone() .unwrap_or_else(|| util::to_camel_case(&field_name.to_string())); let resolver_code = quote!( &self . #field_name ); Some(util::GraphQLTypeDefinitionField { name, _type: field.ty, args: Vec::new(), description: field_attrs.description, deprecation: field_attrs.deprecation, resolver_code, }) } }); let definition = util::GraphQLTypeDefiniton { name, _type: syn::parse_str(&ast.ident.to_string()).unwrap(), context: attrs.context, scalar: attrs.scalar, description: attrs.description, fields: fields.collect(), generics: ast.generics, interfaces: None, include_type_generics: true, generic_scalar: true, }; let juniper_crate_name = if is_internal
else { "juniper" }; definition.into_tokens(juniper_crate_name) }
{ "crate" }
inference.py
import argparse import os import cv2 import librosa import numpy as np import soundfile as sf import torch from tqdm import tqdm from lib import dataset from lib import nets from lib import spec_utils class VocalRemover(object): def __init__(self, model, device, window_size): self.model = model self.offset = model.offset self.device = device self.window_size = window_size def _execute(self, X_mag_pad, roi_size, n_window): self.model.eval() with torch.no_grad(): preds = [] for i in tqdm(range(n_window)): start = i * roi_size X_mag_window = X_mag_pad[None, :, :, start:start + self.window_size] X_mag_window = torch.from_numpy(X_mag_window).to(self.device) pred = self.model.predict(X_mag_window) pred = pred.detach().cpu().numpy() preds.append(pred[0]) pred = np.concatenate(preds, axis=2) return pred def
(self, X_spec): X_mag = np.abs(X_spec) X_phase = np.angle(X_spec) return X_mag, X_phase def inference(self, X_spec): X_mag, X_phase = self.preprocess(X_spec) coef = X_mag.max() X_mag_pre = X_mag / coef n_frame = X_mag_pre.shape[2] pad_l, pad_r, roi_size = dataset.make_padding(n_frame, self.window_size, self.offset) n_window = int(np.ceil(n_frame / roi_size)) X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode='constant') pred = self._execute(X_mag_pad, roi_size, n_window) pred = pred[:, :, :n_frame] return pred * coef, X_mag, np.exp(1.j * X_phase) def inference_tta(self, X_spec): X_mag, X_phase = self.preprocess(X_spec) coef = X_mag.max() X_mag_pre = X_mag / coef n_frame = X_mag_pre.shape[2] pad_l, pad_r, roi_size = dataset.make_padding(n_frame, self.window_size, self.offset) n_window = int(np.ceil(n_frame / roi_size)) X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode='constant') pred = self._execute(X_mag_pad, roi_size, n_window) pred = pred[:, :, :n_frame] pad_l += roi_size // 2 pad_r += roi_size // 2 n_window += 1 X_mag_pad = np.pad(X_mag_pre, ((0, 0), (0, 0), (pad_l, pad_r)), mode='constant') pred_tta = self._execute(X_mag_pad, roi_size, n_window) pred_tta = pred_tta[:, :, roi_size // 2:] pred_tta = pred_tta[:, :, :n_frame] return (pred + pred_tta) * 0.5 * coef, X_mag, np.exp(1.j * X_phase) def main(): p = argparse.ArgumentParser() p.add_argument('--gpu', '-g', type=int, default=-1) p.add_argument('--pretrained_model', '-P', type=str, default='models/baseline.pth') p.add_argument('--input', '-i', required=True) p.add_argument('--sr', '-r', type=int, default=44100) p.add_argument('--n_fft', '-f', type=int, default=2048) p.add_argument('--hop_length', '-l', type=int, default=1024) p.add_argument('--window_size', '-w', type=int, default=512) p.add_argument('--output_image', '-I', action='store_true') p.add_argument('--postprocess', '-p', action='store_true') p.add_argument('--tta', '-t', action='store_true') args = p.parse_args() print('loading model...', end=' ') device = torch.device('cpu') model = nets.CascadedASPPNet(args.n_fft) model.load_state_dict(torch.load(args.pretrained_model, map_location=device)) if torch.cuda.is_available() and args.gpu >= 0: device = torch.device('cuda:{}'.format(args.gpu)) model.to(device) print('done') print('loading wave source...', end=' ') X, sr = librosa.load( args.input, args.sr, False, dtype=np.float32, res_type='kaiser_fast') basename = os.path.splitext(os.path.basename(args.input))[0] print('done') if X.ndim == 1: X = np.asarray([X, X]) print('stft of wave source...', end=' ') X = spec_utils.wave_to_spectrogram(X, args.hop_length, args.n_fft) print('done') vr = VocalRemover(model, device, args.window_size) if args.tta: pred, X_mag, X_phase = vr.inference_tta(X) else: pred, X_mag, X_phase = vr.inference(X) if args.postprocess: print('post processing...', end=' ') pred_inv = np.clip(X_mag - pred, 0, np.inf) pred = spec_utils.mask_silence(pred, pred_inv) print('done') print('inverse stft of instruments...', end=' ') y_spec = pred * X_phase wave = spec_utils.spectrogram_to_wave(y_spec, hop_length=args.hop_length) print('done') sf.write('{}_Instruments.wav'.format(basename), wave.T, sr) print('inverse stft of vocals...', end=' ') v_spec = np.clip(X_mag - pred, 0, np.inf) * X_phase wave = spec_utils.spectrogram_to_wave(v_spec, hop_length=args.hop_length) print('done') sf.write('{}_Vocals.wav'.format(basename), wave.T, sr) if args.output_image: with open('{}_Instruments.jpg'.format(basename), mode='wb') as f: image = spec_utils.spectrogram_to_image(y_spec) _, bin_image = cv2.imencode('.jpg', image) bin_image.tofile(f) with open('{}_Vocals.jpg'.format(basename), mode='wb') as f: image = spec_utils.spectrogram_to_image(v_spec) _, bin_image = cv2.imencode('.jpg', image) bin_image.tofile(f) if __name__ == '__main__': main()
preprocess