file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
profile.js | $(document).ready(function () {
// Copy
$('.copy-btn').on('click', function () {
copyKeyToClipboard($(this).parent().data('client-key'));
});
// New client
$('#new-client-btn').on('click', function () {
showNewClientForm();
});
// Reset API key
$('.reset-key-btn').on('click', function () {
var clientId = $(this).parent().parent().data('client-id');
showPrompt('Are you sure?', 'If you reset this API key, you must change it in your application.', function () {
resetClientAPIKey.bind(this)(clientId);
}.bind(this));
});
// Delete client
$('.delete-client-btn').on('click', function () {
var clientId = $(this).parent().parent().data('client-id');
showPrompt('Are you sure?', 'If you delete this client, our service would become unavailable to you through it\'s api key. Also, you don\'t get your money back.', function () {
deleteClient(clientId);
});
});
// Edit client name
$('.edit-client-btn').on('click', function () {
var clientId = $(this).parent().parent().data('client-id');
showEditClientNameForm(clientId);
});
$('.change-client-name-form .btn').on('click', function () {
submitEditClientNameForm($('.change-client-name-form .client-id-field').val(), $('.change-client-name-form .client-name-field').val());
});
});
function showEditClientNameForm(clientId) {
var data = {
clientId: clientId
};
$.ajax({
url: '/Profile/GetClientName',
type: 'GET',
data: data,
success: function (result) {
$('.change-client-name-form .client-name-field').val(result.clientName);
$('.change-client-name-form .client-id-field').val(clientId);
$('.fader').show();
$('.change-client-name-form').show();
},
error: function (xhr, ajaxOptions, thrownError) {
alert('failure');///TODO Show toast message.
}
});
}
function submitEditClientNameForm(clientId, newClientName) {
var data = {
clientId: clientId,
newName: newClientName
};
$.ajax({
url: '/Profile/UpdateClientName',
type: 'POST',
data: data,
success: function (result) {
window.location.reload();
},
error: function (xhr, ajaxOptions, thrownError) {
alert('failure');///TODO Show toast message.
}
});
}
function de | lientId) {
var data = {
clientId: clientId
};
$.ajax({
url: '/Profile/DeleteClient',
type: 'POST',
data: data,
success: function (result) {
closePrompt();
window.location.reload()
}.bind(this),
error: function (xhr, ajaxOptions, thrownError) {
closePrompt();
alert('failure');///TODO Show toast message.
}
});
}
function resetClientAPIKey(clientId) {
var data = {
clientId: clientId
};
$.ajax({
url: '/Profile/ResetApiKey',
type: 'POST',
data: data,
success: function (result) {
closePrompt();
$(this).siblings('h4').find('span').html(result.newKey);
}.bind(this),
error: function (xhr, ajaxOptions, thrownError) {
alert('failure');///TODO Show toast message.
closePrompt();
}
});
}
function copyKeyToClipboard(key) {
if (window.clipboardData && window.clipboardData.setData) {
// IE specific code path to prevent textarea being shown while dialog is visible.
return clipboardData.setData("Text", key);
} else if (document.queryCommandSupported && document.queryCommandSupported("copy")) {
var textarea = document.createElement("textarea");
textarea.textContent = key;
textarea.style.position = "fixed";// Prevent scrolling to bottom of page in MS Edge.
document.body.appendChild(textarea);
textarea.select();
try {
return document.execCommand("copy");
} catch (ex) {
console.warn("Copy to clipboard failed.", ex);///TODO Swap with showToast.
alert('Your browser doesn\'t support clipboard manipulation.');
return false;
} finally {
document.body.removeChild(textarea);
alert('Copied to clipboard!');///TODO Swap with showToast.
}
}
}
function showNewClientForm() {
$('.fader').show();
$('.new-client-form').show();
}
| leteClient(c |
edging_machine.service.spec.ts | import { TestBed } from '@angular/core/testing';
import { CncMachineService } from './cnc_machine.service';
describe('CncMachineService', () => {
beforeEach(() => TestBed.configureTestingModule({}));
it('should be created', () => {
const service: CncMachineService = TestBed.get(CncMachineService);
expect(service).toBeTruthy();
}); | }); | |
coherence_inherent.rs | // Tests that methods that implement a trait cannot be invoked
// unless the trait is imported.
mod Lib {
pub trait TheTrait {
fn the_fn(&self);
}
pub struct TheStruct;
impl TheTrait for TheStruct {
fn the_fn(&self) |
}
}
mod Import {
// Trait is in scope here:
use Lib::TheStruct;
use Lib::TheTrait;
fn call_the_fn(s: &TheStruct) {
s.the_fn();
}
}
mod NoImport {
// Trait is not in scope here:
use Lib::TheStruct;
fn call_the_fn(s: &TheStruct) {
s.the_fn(); //~ ERROR no method named `the_fn` found
}
}
fn main() {}
| {} |
image-uploader.tsx | import React, { FC, useLayoutEffect, useRef, useState } from 'react'
import { AddOutline } from 'antd-mobile-icons'
import { mergeProps } from '../../utils/with-default-props'
import ImageViewer from '../image-viewer'
import PreviewItem from './preview-item'
import { useNewControllableValue } from '../../utils/use-controllable-value'
import { usePersistFn } from 'ahooks'
import Space from '../space'
import { convertPx } from '../../utils/convert-px'
export type TaskStatus = 'pending' | 'fail'
export interface FileItem {
url: string
}
type Task = {
id: number
url?: string
file: File
status: TaskStatus
}
export type ImageUploaderProps = {
defaultValue?: FileItem[]
value?: FileItem[]
onChange?: (fileList: FileItem[]) => void
accept?: string
multiple?: boolean
maxCount?: number
onCountExceed?: (exceed: number) => void
disableUpload?: boolean
showUpload?: boolean
deletable?: boolean
capture?: boolean | string
onPreview?: (index: number) => void
beforeUpload?: (file: File[]) => Promise<File[]> | File[]
upload: (file: File) => Promise<FileItem>
}
const classPrefix = `adm-image-uploader`
const defaultProps = {
disableUpload: false,
deletable: true,
showUpload: true,
multiple: false,
maxCount: 0,
defaultValue: [] as FileItem[],
accept: 'image/*',
}
export const ImageUploader: FC<ImageUploaderProps> = p => { | const props = mergeProps(defaultProps, p)
const [value, setValue] = useNewControllableValue(props)
const updateValue = usePersistFn(
(updater: (prev: FileItem[]) => FileItem[]) => {
setValue(updater(value))
}
)
const [tasks, setTasks] = useState<Task[]>([])
useLayoutEffect(() => {
setTasks(prev =>
prev.filter(task => {
if (task.url === undefined) return true
return !value.some(fileItem => fileItem.url === task.url)
})
)
}, [value])
const idCountRef = useRef(0)
const { maxCount, onPreview } = props
async function onChange(e: React.ChangeEvent<HTMLInputElement>) {
const { files: rawFiles } = e.target
if (!rawFiles) return
let files = [].slice.call(rawFiles) as File[]
if (props.beforeUpload) {
files = await props.beforeUpload(files)
}
if (files.length === 0) {
return
}
if (maxCount > 0) {
const exceed = value.length + files.length - maxCount
if (exceed > 0) {
files = files.slice(0, maxCount - exceed)
props.onCountExceed?.(exceed)
}
}
const newTasks = files.map(
file =>
({
id: idCountRef.current++,
status: 'pending',
file,
} as Task)
)
setTasks(prev => [...prev, ...newTasks])
await Promise.all(
newTasks.map(async currentTask => {
try {
const result = await props.upload(currentTask.file)
setTasks(prev => {
return prev.map(task => {
if (task.id === currentTask.id) {
return {
...task,
url: result.url,
}
}
return task
})
})
updateValue(prev => [
...prev,
{
url: result.url,
},
])
} catch (e) {
setTasks(prev => {
return prev.map(task => {
if (task.id === currentTask.id) {
return {
...task,
status: 'fail',
}
}
return task
})
})
throw e
}
})
)
}
function previewImage(index: number) {
ImageViewer.Multi.show({
images: value.map(fileItem => fileItem.url),
defaultIndex: index,
})
onPreview && onPreview(index)
}
const showUpload =
props.showUpload &&
(maxCount === 0 || value.length + tasks.length < maxCount)
return (
<div className={classPrefix}>
<Space size={convertPx(12)} wrap>
{value.map((fileItem, index) => (
<PreviewItem
key={fileItem.url}
url={fileItem.url}
deletable={props.deletable}
onClick={() => previewImage(index)}
onDelete={() => {
setValue(value.filter(x => x.url !== fileItem.url))
}}
/>
))}
{tasks.map(task => (
<PreviewItem
key={task.id}
file={task.file}
deletable={task.status !== 'pending'}
status={task.status}
onDelete={() => {
setValue(value.filter(x => x.url !== task.url))
}}
/>
))}
{showUpload && (
<span
className={`${classPrefix}-cell ${classPrefix}-upload-button`}
role='button'
>
<span className={`${classPrefix}-upload-button-icon`}>
<AddOutline />
</span>
{!props.disableUpload && (
<input
capture={props.capture}
accept={props.accept}
multiple={props.multiple}
type='file'
className={`${classPrefix}-input`}
onChange={onChange}
/>
)}
</span>
)}
</Space>
</div>
)
} | |
args.ts | /**
* Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
import type {Config} from '@jest/types';
import {constants, isJSONString} from 'jest-config';
import isCI = require('is-ci');
export function | (argv: Config.Argv): true {
if (argv.runInBand && argv.hasOwnProperty('maxWorkers')) {
throw new Error(
'Both --runInBand and --maxWorkers were specified, but these two ' +
'options do not make sense together. Which is it?',
);
}
for (const key of [
'onlyChanged',
'lastCommit',
'changedFilesWithAncestor',
'changedSince',
]) {
if (argv[key] && argv.watchAll) {
throw new Error(
`Both --${key} and --watchAll were specified, but these two ` +
'options do not make sense together. Try the --watch option which ' +
'reruns only tests related to changed files.',
);
}
}
if (argv.findRelatedTests && argv._.length === 0) {
throw new Error(
'The --findRelatedTests option requires file paths to be specified.\n' +
'Example usage: jest --findRelatedTests ./src/source.js ' +
'./src/index.js.',
);
}
if (argv.hasOwnProperty('maxWorkers') && argv.maxWorkers === undefined) {
throw new Error(
'The --maxWorkers (-w) option requires a number or string to be specified.\n' +
'Example usage: jest --maxWorkers 2\n' +
'Example usage: jest --maxWorkers 50%\n' +
'Or did you mean --watch?',
);
}
if (argv.selectProjects && argv.selectProjects.length === 0) {
throw new Error(
'The --selectProjects option requires the name of at least one project to be specified.\n' +
'Example usage: jest --selectProjects my-first-project my-second-project',
);
}
if (
argv.config &&
!isJSONString(argv.config) &&
!argv.config.match(
new RegExp(
`\\.(${constants.JEST_CONFIG_EXT_ORDER.map(e => e.substring(1)).join(
'|',
)})$`,
'i',
),
)
) {
throw new Error(
`The --config option requires a JSON string literal, or a file path with one of these extensions: ${constants.JEST_CONFIG_EXT_ORDER.join(
', ',
)}.\nExample usage: jest --config ./jest.config.js`,
);
}
return true;
}
export const usage =
'Usage: $0 [--config=<pathToConfigFile>] [TestPathPattern]';
export const docs = 'Documentation: https://jestjs.io/';
export const options = {
all: {
default: undefined,
description:
'The opposite of `onlyChanged`. If `onlyChanged` is set by ' +
'default, running jest with `--all` will force Jest to run all tests ' +
'instead of running only tests related to changed files.',
type: 'boolean',
},
automock: {
default: undefined,
description: 'Automock all files by default.',
type: 'boolean',
},
bail: {
alias: 'b',
default: undefined,
description:
'Exit the test suite immediately after `n` number of failing tests.',
type: 'boolean',
},
browser: {
default: undefined,
description:
'Respect the "browser" field in package.json ' +
'when resolving modules. Some packages export different versions ' +
'based on whether they are operating in node.js or a browser.',
type: 'boolean',
},
cache: {
default: undefined,
description:
'Whether to use the transform cache. Disable the cache ' +
'using --no-cache.',
type: 'boolean',
},
cacheDirectory: {
description:
'The directory where Jest should store its cached ' +
' dependency information.',
type: 'string',
},
changedFilesWithAncestor: {
default: undefined,
description:
'Runs tests related to the current changes and the changes made in the ' +
'last commit. Behaves similarly to `--onlyChanged`.',
type: 'boolean',
},
changedSince: {
description:
'Runs tests related to the changes since the provided branch. If the ' +
'current branch has diverged from the given branch, then only changes ' +
'made locally will be tested. Behaves similarly to `--onlyChanged`.',
nargs: 1,
type: 'string',
},
ci: {
default: isCI,
description:
'Whether to run Jest in continuous integration (CI) mode. ' +
'This option is on by default in most popular CI environments. It will ' +
'prevent snapshots from being written unless explicitly requested.',
type: 'boolean',
},
clearCache: {
default: undefined,
description:
'Clears the configured Jest cache directory and then exits. ' +
'Default directory can be found by calling jest --showConfig',
type: 'boolean',
},
clearMocks: {
default: undefined,
description:
'Automatically clear mock calls and instances between every ' +
'test. Equivalent to calling jest.clearAllMocks() between each test.',
type: 'boolean',
},
collectCoverage: {
default: undefined,
description: 'Alias for --coverage.',
type: 'boolean',
},
collectCoverageFrom: {
description:
'A glob pattern relative to <rootDir> matching the files that coverage ' +
'info needs to be collected from.',
type: 'string',
},
collectCoverageOnlyFrom: {
description: 'Explicit list of paths coverage will be restricted to.',
string: true,
type: 'array',
},
color: {
default: undefined,
description:
'Forces test results output color highlighting (even if ' +
'stdout is not a TTY). Set to false if you would like to have no colors.',
type: 'boolean',
},
colors: {
default: undefined,
description: 'Alias for `--color`.',
type: 'boolean',
},
config: {
alias: 'c',
description:
'The path to a jest config file specifying how to find ' +
'and execute tests. If no rootDir is set in the config, the directory ' +
'containing the config file is assumed to be the rootDir for the project.' +
'This can also be a JSON encoded value which Jest will use as configuration.',
type: 'string',
},
coverage: {
default: undefined,
description:
'Indicates that test coverage information should be ' +
'collected and reported in the output.',
type: 'boolean',
},
coverageDirectory: {
description: 'The directory where Jest should output its coverage files.',
type: 'string',
},
coveragePathIgnorePatterns: {
description:
'An array of regexp pattern strings that are matched ' +
'against all file paths before executing the test. If the file path' +
'matches any of the patterns, coverage information will be skipped.',
string: true,
type: 'array',
},
coverageProvider: {
choices: ['babel', 'v8'],
description: 'Select between Babel and V8 to collect coverage',
},
coverageReporters: {
description:
'A list of reporter names that Jest uses when writing ' +
'coverage reports. Any istanbul reporter can be used.',
string: true,
type: 'array',
},
coverageThreshold: {
description:
'A JSON string with which will be used to configure ' +
'minimum threshold enforcement for coverage results',
type: 'string',
},
debug: {
default: undefined,
description: 'Print debugging info about your jest config.',
type: 'boolean',
},
detectLeaks: {
default: false,
description:
'**EXPERIMENTAL**: Detect memory leaks in tests. After executing a ' +
'test, it will try to garbage collect the global object used, and fail ' +
'if it was leaked',
type: 'boolean',
},
detectOpenHandles: {
default: false,
description:
'Print out remaining open handles preventing Jest from exiting at the ' +
'end of a test run. Implies `runInBand`.',
type: 'boolean',
},
env: {
description:
'The test environment used for all tests. This can point to ' +
'any file or node module. Examples: `jsdom`, `node` or ' +
'`path/to/my-environment.js`',
type: 'string',
},
errorOnDeprecated: {
default: false,
description: 'Make calling deprecated APIs throw helpful error messages.',
type: 'boolean',
},
expand: {
alias: 'e',
default: undefined,
description: 'Use this flag to show full diffs instead of a patch.',
type: 'boolean',
},
filter: {
default: undefined,
description:
'Path to a module exporting a filtering function. This method receives ' +
'a list of tests which can be manipulated to exclude tests from ' +
'running. Especially useful when used in conjunction with a testing ' +
'infrastructure to filter known broken tests.',
type: 'string',
},
findRelatedTests: {
default: undefined,
description:
'Find related tests for a list of source files that were ' +
'passed in as arguments. Useful for pre-commit hook integration to run ' +
'the minimal amount of tests necessary.',
type: 'boolean',
},
forceExit: {
default: undefined,
description:
'Force Jest to exit after all tests have completed running. ' +
'This is useful when resources set up by test code cannot be ' +
'adequately cleaned up.',
type: 'boolean',
},
globalSetup: {
description: 'The path to a module that runs before All Tests.',
type: 'string',
},
globalTeardown: {
description: 'The path to a module that runs after All Tests.',
type: 'string',
},
globals: {
description:
'A JSON string with map of global variables that need ' +
'to be available in all test environments.',
type: 'string',
},
haste: {
description:
'A JSON string with map of variables for the haste module system',
type: 'string',
},
init: {
description: 'Generate a basic configuration file',
type: 'boolean',
},
json: {
default: undefined,
description:
'Prints the test results in JSON. This mode will send all ' +
'other test output and user messages to stderr.',
type: 'boolean',
},
lastCommit: {
default: undefined,
description:
'Run all tests affected by file changes in the last commit made. ' +
'Behaves similarly to `--onlyChanged`.',
type: 'boolean',
},
listTests: {
default: false,
description:
'Lists all tests Jest will run given the arguments and ' +
'exits. Most useful in a CI system together with `--findRelatedTests` ' +
'to determine the tests Jest will run based on specific files',
type: 'boolean',
},
logHeapUsage: {
default: undefined,
description:
'Logs the heap usage after every test. Useful to debug ' +
'memory leaks. Use together with `--runInBand` and `--expose-gc` in ' +
'node.',
type: 'boolean',
},
mapCoverage: {
default: undefined,
description:
'Maps code coverage reports against original source code ' +
'when transformers supply source maps.\n\nDEPRECATED',
type: 'boolean',
},
maxConcurrency: {
default: 5,
description:
'Specifies the maximum number of tests that are allowed to run' +
'concurrently. This only affects tests using `test.concurrent`.',
type: 'number',
},
maxWorkers: {
alias: 'w',
description:
'Specifies the maximum number of workers the worker-pool ' +
'will spawn for running tests. This defaults to the number of the ' +
'cores available on your machine. (its usually best not to override ' +
'this default)',
type: 'string',
},
moduleDirectories: {
description:
'An array of directory names to be searched recursively ' +
"up from the requiring module's location.",
string: true,
type: 'array',
},
moduleFileExtensions: {
description:
'An array of file extensions your modules use. If you ' +
'require modules without specifying a file extension, these are the ' +
'extensions Jest will look for. ',
string: true,
type: 'array',
},
moduleNameMapper: {
description:
'A JSON string with a map from regular expressions to ' +
'module names or to arrays of module names that allow to stub ' +
'out resources, like images or styles with a single module',
type: 'string',
},
modulePathIgnorePatterns: {
description:
'An array of regexp pattern strings that are matched ' +
'against all module paths before those paths are to be considered ' +
'"visible" to the module loader.',
string: true,
type: 'array',
},
modulePaths: {
description:
'An alternative API to setting the NODE_PATH env variable, ' +
'modulePaths is an array of absolute paths to additional locations to ' +
'search when resolving modules.',
string: true,
type: 'array',
},
noStackTrace: {
default: undefined,
description: 'Disables stack trace in test results output',
type: 'boolean',
},
notify: {
default: undefined,
description: 'Activates notifications for test results.',
type: 'boolean',
},
notifyMode: {
default: 'failure-change',
description: 'Specifies when notifications will appear for test results.',
type: 'string',
},
onlyChanged: {
alias: 'o',
default: undefined,
description:
'Attempts to identify which tests to run based on which ' +
"files have changed in the current repository. Only works if you're " +
'running tests in a git or hg repository at the moment.',
type: 'boolean',
},
onlyFailures: {
alias: 'f',
default: undefined,
description: 'Run tests that failed in the previous execution.',
type: 'boolean',
},
outputFile: {
description:
'Write test results to a file when the --json option is ' +
'also specified.',
type: 'string',
},
passWithNoTests: {
default: false,
description:
'Will not fail if no tests are found (for example while using `--testPathPattern`.)',
type: 'boolean',
},
preset: {
description: "A preset that is used as a base for Jest's configuration.",
type: 'string',
},
prettierPath: {
default: undefined,
description: 'The path to the "prettier" module used for inline snapshots.',
type: 'string',
},
projects: {
description:
'A list of projects that use Jest to run all tests of all ' +
'projects in a single instance of Jest.',
string: true,
type: 'array',
},
reporters: {
description: 'A list of custom reporters for the test suite.',
string: true,
type: 'array',
},
resetMocks: {
default: undefined,
description:
'Automatically reset mock state between every test. ' +
'Equivalent to calling jest.resetAllMocks() between each test.',
type: 'boolean',
},
resetModules: {
default: undefined,
description:
'If enabled, the module registry for every test file will ' +
'be reset before running each individual test.',
type: 'boolean',
},
resolver: {
description: 'A JSON string which allows the use of a custom resolver.',
type: 'string',
},
restoreMocks: {
default: undefined,
description:
'Automatically restore mock state and implementation between every test. ' +
'Equivalent to calling jest.restoreAllMocks() between each test.',
type: 'boolean',
},
rootDir: {
description:
'The root directory that Jest should scan for tests and ' +
'modules within.',
type: 'string',
},
roots: {
description:
'A list of paths to directories that Jest should use to ' +
'search for files in.',
string: true,
type: 'array',
},
runInBand: {
alias: 'i',
default: undefined,
description:
'Run all tests serially in the current process (rather than ' +
'creating a worker pool of child processes that run tests). This ' +
'is sometimes useful for debugging, but such use cases are pretty ' +
'rare.',
type: 'boolean',
},
runTestsByPath: {
default: false,
description:
'Used when provided patterns are exact file paths. This avoids ' +
'converting them into a regular expression and matching it against ' +
'every single file.',
type: 'boolean',
},
runner: {
description:
"Allows to use a custom runner instead of Jest's default test runner.",
type: 'string',
},
selectProjects: {
description:
'Run only the tests of the specified projects.' +
'Jest uses the attribute `displayName` in the configuration to identify each project.',
string: true,
type: 'array',
},
setupFiles: {
description:
'A list of paths to modules that run some code to configure or ' +
'set up the testing environment before each test. ',
string: true,
type: 'array',
},
setupFilesAfterEnv: {
description:
'A list of paths to modules that run some code to configure or ' +
'set up the testing framework before each test ',
string: true,
type: 'array',
},
showConfig: {
default: undefined,
description: 'Print your jest config and then exits.',
type: 'boolean',
},
silent: {
default: undefined,
description: 'Prevent tests from printing messages through the console.',
type: 'boolean',
},
skipFilter: {
default: undefined,
description:
'Disables the filter provided by --filter. Useful for CI jobs, or ' +
'local enforcement when fixing tests.',
type: 'boolean',
},
snapshotSerializers: {
description:
'A list of paths to snapshot serializer modules Jest should ' +
'use for snapshot testing.',
string: true,
type: 'array',
},
testEnvironment: {
description: 'Alias for --env',
type: 'string',
},
testEnvironmentOptions: {
description:
'Test environment options that will be passed to the testEnvironment. ' +
'The relevant options depend on the environment.',
type: 'string', // Object
},
testFailureExitCode: {
description: 'Exit code of `jest` command if the test run failed',
type: 'string', // number
},
testLocationInResults: {
default: false,
description: 'Add `location` information to the test results',
type: 'boolean',
},
testMatch: {
description: 'The glob patterns Jest uses to detect test files.',
string: true,
type: 'array',
},
testNamePattern: {
alias: 't',
description: 'Run only tests with a name that matches the regex pattern.',
type: 'string',
},
testPathIgnorePatterns: {
description:
'An array of regexp pattern strings that are matched ' +
'against all test paths before executing the test. If the test path ' +
'matches any of the patterns, it will be skipped.',
string: true,
type: 'array',
},
testPathPattern: {
description:
'A regexp pattern string that is matched against all tests ' +
'paths before executing the test.',
string: true,
type: 'array',
},
testRegex: {
description:
'A string or array of string regexp patterns that Jest uses to detect test files.',
string: true,
type: 'array',
},
testResultsProcessor: {
description:
'Allows the use of a custom results processor. ' +
'This processor must be a node module that exports ' +
'a function expecting as the first argument the result object.',
type: 'string',
},
testRunner: {
description:
'Allows to specify a custom test runner. The default is ' +
' `jasmine2`. A path to a custom test runner can be provided: ' +
'`<rootDir>/path/to/testRunner.js`.',
type: 'string',
},
testSequencer: {
description:
'Allows to specify a custom test sequencer. The default is ' +
'`@jest/test-sequencer`. A path to a custom test sequencer can be ' +
'provided: `<rootDir>/path/to/testSequencer.js`',
type: 'string',
},
testTimeout: {
description: 'This option sets the default timeouts of test cases.',
type: 'number',
},
testURL: {
description: 'This option sets the URL for the jsdom environment.',
type: 'string',
},
timers: {
description:
'Setting this value to fake allows the use of fake timers ' +
'for functions such as setTimeout.',
type: 'string',
},
transform: {
description:
'A JSON string which maps from regular expressions to paths ' +
'to transformers.',
type: 'string',
},
transformIgnorePatterns: {
description:
'An array of regexp pattern strings that are matched ' +
'against all source file paths before transformation.',
string: true,
type: 'array',
},
unmockedModulePathPatterns: {
description:
'An array of regexp pattern strings that are matched ' +
'against all modules before the module loader will automatically ' +
'return a mock for them.',
string: true,
type: 'array',
},
updateSnapshot: {
alias: 'u',
default: undefined,
description:
'Use this flag to re-record snapshots. ' +
'Can be used together with a test suite pattern or with ' +
'`--testNamePattern` to re-record snapshot for test matching ' +
'the pattern',
type: 'boolean',
},
useStderr: {
default: undefined,
description: 'Divert all output to stderr.',
type: 'boolean',
},
verbose: {
default: undefined,
description:
'Display individual test results with the test suite hierarchy.',
type: 'boolean',
},
version: {
alias: 'v',
default: undefined,
description: 'Print the version and exit',
type: 'boolean',
},
watch: {
default: undefined,
description:
'Watch files for changes and rerun tests related to ' +
'changed files. If you want to re-run all tests when a file has ' +
'changed, use the `--watchAll` option.',
type: 'boolean',
},
watchAll: {
default: undefined,
description:
'Watch files for changes and rerun all tests. If you want ' +
'to re-run only the tests related to the changed files, use the ' +
'`--watch` option.',
type: 'boolean',
},
watchPathIgnorePatterns: {
description:
'An array of regexp pattern strings that are matched ' +
'against all paths before trigger test re-run in watch mode. ' +
'If the test path matches any of the patterns, it will be skipped.',
string: true,
type: 'array',
},
watchman: {
default: undefined,
description:
'Whether to use watchman for file crawling. Disable using ' +
'--no-watchman.',
type: 'boolean',
},
} as const;
| check |
list_adapters.rs | #[tokio::main(flavor = "current_thread")]
async fn main() -> bluer::Result<()> | {
let session = bluer::Session::new().await?;
let adapter_names = session.adapter_names().await?;
for adapter_name in adapter_names {
println!("Bluetooth adapater {}:", &adapter_name);
let adapter = session.adapter(&adapter_name)?;
println!(" Address: {}", adapter.address().await?);
println!(" Address type: {}", adapter.address_type().await?);
println!(" Friendly name: {}", adapter.alias().await?);
println!(" Modalias: {:?}", adapter.modalias().await?);
println!(" Powered: {:?}", adapter.is_powered().await?);
println!(" Discoverabe: {:?}", adapter.is_discoverable().await?);
println!(" Pairable: {:?}", adapter.is_pairable().await?);
println!(" UUIDs: {:?}", adapter.uuids().await?);
println!();
println!(" Active adv. instances: {}", adapter.active_advertising_instances().await?);
println!(" Supp. adv. instances: {}", adapter.supported_advertising_instances().await?);
println!(" Supp. adv. includes: {:?}", adapter.supported_advertising_system_includes().await?);
println!(" Adv. capabilites: {:?}", adapter.supported_advertising_capabilities().await?);
println!(" Adv. features: {:?}", adapter.supported_advertising_features().await?);
println!();
}
Ok(())
} |
|
abilities.ts | export const Abilities: {[k: string]: ModdedAbilityData} = {
infectiousbite: {
name: "Infectious Bite",
desc: "This is a long description",
shortDesc: "Infects Pokemon that attack it.",
onDamagingHit(damage, target, source, move) {
const sourceAbility = source.getAbility();
if (sourceAbility.isPermanent || sourceAbility.id === 'infectiousbite' || sourceAbility.id === 'infected') {
return; | if (oldAbility) {
this.add('-activate', target, 'ability: Infectious Bite', this.dex.abilities.get(oldAbility).name, '[of] ' + source);
}
}
},
},
infected: {
name: "Infected",
desc: "",
shortDesc: "Infects Pokemon that attack it, also, is effected.",
onDamagingHit(damage, target, source, move) {
const sourceAbility = source.getAbility();
if (sourceAbility.isPermanent || sourceAbility.id === 'infectiousbite') {
return;
}
if (this.checkMoveMakesContact(move, source, target, !source.isAlly(target))) {
const oldAbility = source.setAbility('mummy', target);
if (oldAbility) {
this.add('-activate', target, 'ability: Infected', this.dex.abilities.get(oldAbility).name, '[of] ' + source);
}
}
},
},
}; | }
if (this.checkMoveMakesContact(move, source, target, !source.isAlly(target))) {
const oldAbility = source.setAbility('infected', target); |
cluster.py | from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
import matplotlib.pyplot as plt
def plot_clustering(data):
'''
Definition:
This function plot the squared error for the clustered points
args:
data to be clusterd
returns:
None
'''
cost =[] | for i in range(2, max_clusters):
print("Analysing ", i, " clusters")
KM = MiniBatchKMeans(n_clusters = i,batch_size=20000)
KM.fit(data)
cost.append(KM.inertia_)
plt.plot(range(2, max_clusters), cost, color ='g', linewidth ='3')
plt.xlabel("Number of Clusters")
plt.ylabel("Squared Error (Cost)")
plt.show()
def do_clustering(data,number_clusters):
'''
Definition:
This function initizalize KMeans with number_clusters and fit to data
args:
data to be clustered, number_clusters
returns:
fitted K-Means mdel
'''
kmeans = KMeans(number_clusters)
fitted_model_k_means = kmeans.fit(data)
return fitted_model_k_means | max_clusters = 20 |
error.rs | use actix_web::{http::StatusCode, BaseHttpResponse, ResponseError, body::Body};
use derive_more::{Display, Error};
/// Errors that can occur when processing CORS guarded requests.
#[derive(Debug, Clone, Display, Error)]
#[non_exhaustive]
pub enum CorsError {
/// Allowed origin argument must not be wildcard (`*`).
#[display(fmt = "`allowed_origin` argument must not be wildcard (`*`).")]
WildcardOrigin, | /// Request header `Origin` is required but was not provided.
#[display(fmt = "Request header `Origin` is required but was not provided.")]
MissingOrigin,
/// Request header `Access-Control-Request-Method` is required but is missing.
#[display(
fmt = "Request header `Access-Control-Request-Method` is required but is missing."
)]
MissingRequestMethod,
/// Request header `Access-Control-Request-Method` has an invalid value.
#[display(
fmt = "Request header `Access-Control-Request-Method` has an invalid value."
)]
BadRequestMethod,
/// Request header `Access-Control-Request-Headers` has an invalid value.
#[display(
fmt = "Request header `Access-Control-Request-Headers` has an invalid value."
)]
BadRequestHeaders,
/// Origin is not allowed to make this request.
#[display(fmt = "Origin is not allowed to make this request.")]
OriginNotAllowed,
/// Request method is not allowed.
#[display(fmt = "Requested method is not allowed.")]
MethodNotAllowed,
/// One or more request headers are not allowed.
#[display(fmt = "One or more request headers are not allowed.")]
HeadersNotAllowed,
}
impl ResponseError for CorsError {
fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST
}
fn error_response(&self) -> BaseHttpResponse<Body> {
BaseHttpResponse::new(self.status_code()).set_body(Body::from(self.to_string()))
}
} | |
keyword-static-as-identifier.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z parse-only
// This file was auto-generated using 'src/etc/generate-keyword-tests.py static'
fn | () {
let static = "foo"; //~ error: expected pattern, found keyword `static`
}
| main |
run_models.py | import logging
from src.prep_data import main as prep_data
from src.run_sims import run_aggressive_sim, run_conservative_sim
from src.regression import make_and_run_model as run_model
from src.coupled import make_and_run_model as run_coupled
| __author__ = 'Rusty Gentile'
logger = logging.getLogger(__name__)
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
logger.info('Prepping data...')
prep_data()
logger.info('Starting simulations...')
run_conservative_sim()
run_aggressive_sim()
logger.info('Starting regression models...')
run_model(2019)
run_model(2020)
run_coupled(2019, './data/results/results_aggr_sim_1.csv', 2023)
run_coupled(2020, './data/results/results_aggr_sim_1.csv', 2023) | |
stargzify.go | // Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The stargzify command converts a remote container image into an equivalent
// image with its layers transformed into stargz files instead of gzipped tar
// files. The image is still a valid container image, but its layers contain
// multiple gzip streams instead of one and have a Table of Contents at the end.
package main
import (
"crypto/sha256"
"encoding/hex"
"flag"
"fmt"
"hash"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
"github.com/google/crfs/stargz"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/empty"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/google/go-containerregistry/pkg/v1/stream"
"github.com/google/go-containerregistry/pkg/v1/types"
)
var (
upgrade = flag.Bool("upgrade", false, "upgrade the image in-place by overwriting the tag")
flatten = flag.Bool("flatten", false, "flatten the image's layers into a single layer")
usage = `usage: %[1]s [-upgrade] [-flatten] input [output]
Converting images:
# converts "ubuntu" from dockerhub and uploads to your GCR project
%[1]s ubuntu gcr.io/<your-project>/ubuntu:stargz
# converts and overwrites :latest
%[1]s -upgrade gcr.io/<your-project>/ubuntu:latest
# converts and flattens "ubuntu"
%[1]s -flatten ubuntu gcr.io/<your-project>/ubuntu:flattened
Converting files:
%[1]s file:/tmp/input.tar.gz file:output.stargz
# writes to /tmp/input.stargz
%[1]s file:/tmp/input.tar.gz
`
)
func main() {
flag.Parse()
if len(flag.Args()) < 1 {
printUsage()
}
if strings.HasPrefix(flag.Args()[0], "file:") {
// We'll use "file:" prefix as a signal to convert single files.
convertFile()
} else {
convertImage()
}
}
func printUsage() {
log.Fatalf(usage, os.Args[0])
}
func convertFile() {
var in, out string
if len(flag.Args()) > 0 {
in = strings.TrimPrefix(flag.Args()[0], "file:")
}
if len(flag.Args()) > 1 {
out = strings.TrimPrefix(flag.Args()[1], "file:")
}
var f, fo *os.File // file in, file out
var err error
switch in {
case "":
printUsage()
case "-":
f = os.Stdin
default:
f, err = os.Open(in)
if err != nil {
log.Fatal(err)
}
}
defer f.Close()
if out == "" {
if in == "-" {
out = "-"
} else {
base := strings.TrimSuffix(in, ".gz")
base = strings.TrimSuffix(base, ".tgz")
base = strings.TrimSuffix(base, ".tar")
out = base + ".stargz"
}
}
if out == "-" {
fo = os.Stdout
} else {
fo, err = os.Create(out)
if err != nil {
log.Fatal(err)
}
}
w := stargz.NewWriter(fo)
if err := w.AppendTar(f); err != nil {
log.Fatal(err)
}
if err := w.Close(); err != nil {
log.Fatal(err)
}
if err := fo.Close(); err != nil {
log.Fatal(err)
}
}
func parseFlags(args []string) (string, string) {
if len(args) < 1 {
printUsage()
}
var src, dst string
src = args[0]
if len(args) < 2 {
if *upgrade {
dst = src
} else {
printUsage()
}
} else if len(args) == 2 {
if *upgrade {
log.Println("expected one argument with -upgrade")
printUsage()
} else {
dst = args[1]
}
} else {
log.Println("too many arguments")
printUsage()
}
return src, dst
}
func convertImage() {
src, dst := parseFlags(flag.Args())
srcRef, err := name.ParseReference(src)
if err != nil {
log.Fatal(err)
}
// Pull source image.
srcImg, err := remote.Image(srcRef, remote.WithAuthFromKeychain(authn.DefaultKeychain))
if err != nil {
log.Fatal(err)
}
// Grab original config, clear the layer info from the config file. We want to
// preserve the relevant config.
srcCfg, err := srcImg.ConfigFile()
if err != nil {
log.Fatal(err)
}
srcCfg.RootFS.DiffIDs = []v1.Hash{}
srcCfg.History = []v1.History{}
// Use an empty image with the rest of src's config file as a base.
img, err := mutate.ConfigFile(empty.Image, srcCfg)
if err != nil {
log.Fatal(err)
}
layers, err := convertLayers(srcImg)
if err != nil {
log.Fatal(err)
}
for _, layer := range layers {
img, err = mutate.Append(img, mutate.Addendum{
Layer: layer,
History: v1.History{
// Leave our mark.
CreatedBy: fmt.Sprintf("stargzify %s %s", src, dst),
},
})
if err != nil {
log.Fatal(err)
}
}
// Push the stargzified image to dst.
dstRef, err := name.ParseReference(dst)
if err != nil {
log.Fatal(err)
}
dstAuth, err := authn.DefaultKeychain.Resolve(dstRef.Context().Registry)
if err != nil {
log.Fatal(err)
}
if err := remote.Write(dstRef, img, dstAuth, http.DefaultTransport); err != nil {
log.Fatal(err)
}
}
func convertLayers(img v1.Image) ([]v1.Layer, error) {
if *flatten {
r := mutate.Extract(img)
return []v1.Layer{newLayer(r)}, nil
}
layers, err := img.Layers()
if err != nil {
return nil, err
}
converted := []v1.Layer{}
for _, layer := range layers {
r, err := layer.Uncompressed()
if err != nil {
return nil, err
}
converted = append(converted, newLayer(r))
}
return converted, nil
}
type layer struct {
rc io.ReadCloser
d *digester
diff *v1.Hash
digest *v1.Hash
}
// newLayer converts the given io.ReadCloser to a stargz layer.
func newLayer(rc io.ReadCloser) v1.Layer {
return &layer{
rc: rc,
d: &digester{
h: sha256.New(),
},
}
}
func (l *layer) Digest() (v1.Hash, error) {
if l.digest == nil {
return v1.Hash{}, stream.ErrNotComputed
}
return *l.digest, nil
}
func (l *layer) Size() (int64, error) {
if l.digest == nil {
return -1, stream.ErrNotComputed
}
return l.d.n, nil
}
func (l *layer) DiffID() (v1.Hash, error) {
if l.diff == nil |
return *l.diff, nil
}
func (l *layer) MediaType() (types.MediaType, error) {
// TODO: We might want to set our own media type to indicate stargz layers,
// but that has the potential to break registry compatibility.
return types.DockerLayer, nil
}
func (l *layer) Compressed() (io.ReadCloser, error) {
pr, pw := io.Pipe()
// Convert input blob to stargz while computing diffid, digest, and size.
go func() {
w := stargz.NewWriter(pw)
if err := w.AppendTar(l.rc); err != nil {
pw.CloseWithError(err)
return
}
if err := w.Close(); err != nil {
pw.CloseWithError(err)
return
}
diffid, err := v1.NewHash(w.DiffID())
if err != nil {
pw.CloseWithError(err)
return
}
l.diff = &diffid
l.digest = &v1.Hash{
Algorithm: "sha256",
Hex: hex.EncodeToString(l.d.h.Sum(nil)),
}
pw.Close()
}()
return ioutil.NopCloser(io.TeeReader(pr, l.d)), nil
}
func (l *layer) Uncompressed() (io.ReadCloser, error) {
return l.rc, nil
}
// digester tracks the sha256 and length of what is written to it.
type digester struct {
h hash.Hash
n int64
}
func (d *digester) Write(b []byte) (int, error) {
n, err := d.h.Write(b)
d.n += int64(n)
return n, err
}
| {
return v1.Hash{}, stream.ErrNotComputed
} |
MulAdd.py | import collections
from supriya import CalculationRate
from supriya.synthdefs import UGen
class MulAdd(UGen):
"""
An Optimized multiplication / addition ugen.
::
>>> source = supriya.ugens.SinOsc.ar()
>>> mul_add = supriya.ugens.MulAdd.new(
... addend=0.5,
... multiplier=-1.5,
... source=source, | ... )
>>> mul_add
MulAdd.ar()
"""
### CLASS VARIABLES ###
__documentation_section__ = "Basic Operator UGens"
_ordered_input_names = collections.OrderedDict(
[("source", None), ("multiplier", 1.0), ("addend", 0.0)]
)
### INITIALIZER ###
def __init__(self, addend=0.0, multiplier=1.0, calculation_rate=None, source=None):
UGen.__init__(
self,
addend=addend,
multiplier=multiplier,
calculation_rate=calculation_rate,
source=source,
)
### PRIVATE METHODS ###
@staticmethod
def _inputs_are_valid(source, multiplier, addend):
if CalculationRate.from_expr(source) == CalculationRate.AUDIO:
return True
if CalculationRate.from_expr(source) == CalculationRate.CONTROL:
if CalculationRate.from_expr(multiplier) in (
CalculationRate.CONTROL,
CalculationRate.SCALAR,
):
if CalculationRate.from_expr(addend) in (
CalculationRate.CONTROL,
CalculationRate.SCALAR,
):
return True
return False
@classmethod
def _new_single(
cls, addend=None, multiplier=None, calculation_rate=None, source=None
):
if multiplier == 0.0:
return addend
minus = multiplier == -1
no_multiplier = multiplier == 1
no_addend = addend == 0
if no_multiplier and no_addend:
return source
if minus and no_addend:
return -source
if no_addend:
return source * multiplier
if minus:
return addend - source
if no_multiplier:
return source + addend
if cls._inputs_are_valid(source, multiplier, addend):
return cls(
addend=addend,
multiplier=multiplier,
calculation_rate=calculation_rate,
source=source,
)
if cls._inputs_are_valid(multiplier, source, addend):
return cls(
addend=addend,
multiplier=source,
calculation_rate=calculation_rate,
source=multiplier,
)
return (source * multiplier) + addend
### PUBLIC METHODS ###
@classmethod
def new(cls, source=None, multiplier=1.0, addend=0.0):
"""
Constructs a multiplication / addition ugen.
::
>>> addend = 0.5
>>> multiplier = 1.5
>>> source = supriya.ugens.SinOsc.ar(frequency=[440, 442])
>>> mul_add = supriya.ugens.MulAdd.new(
... addend=addend,
... multiplier=multiplier,
... source=source,
... )
>>> mul_add
UGenArray({2})
Returns ugen graph.
"""
import supriya.synthdefs
# TODO: handle case of array as source
calculation_rate = supriya.CalculationRate.from_expr(
(source, multiplier, addend)
)
ugen = cls._new_expanded(
addend=addend,
multiplier=multiplier,
calculation_rate=calculation_rate,
source=source,
)
return ugen | |
test_deployment.py | from unittest import TestCase
from pyhocon import ConfigTree
from mist.models import Deployment
class DeploymentTest(TestCase):
def test_create_deployment(self):
Deployment('test', 'Artifact', ConfigTree(), '0.0.1')
def test_get_name(self):
d = Deployment('test', 'Artifact', ConfigTree({
'file-path': 'test-name.py'
}), '0.0.1')
self.assertEqual(d.get_name(), 'test_0.0.1.py')
def | (self):
d = Deployment('test', 'Function', ConfigTree({
'context': 'foo',
'path': 'test-name.jar'
}), '0.0.1')
d.with_user('test_name')
self.assertEqual(d.name, 'test_name_test')
self.assertEqual(d.data['path'], 'test_name_test-name.jar')
self.assertEqual(d.data['context'], 'test_name_foo')
| test_with_user_name |
classification.py | import sys, os, shutil
import h5py
import time
import io
import random
import tempfile
from tqdm import tqdm
from absl import app, flags, logging
from ray.util.multiprocessing import Pool
import gcsfs
import numpy as np
from pathlib import Path
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
import torchtext
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch.nn as nn
from transformers import BertTokenizer, BertModel, BertForSequenceClassification
import opacus
from privatekube.experiments.datasets import (
EventLevelDataset,
split_review_batch,
UserTimeLevelDataset,
select_blocks_by_timeframe,
)
from privatekube.experiments.utils import (
build_flags,
flags_to_dict,
load_yaml,
results_to_dict,
save_yaml,
save_model,
binary_accuracy,
multiclass_accuracy,
epoch_time,
)
from privatekube.privacy.text import build_public_vocab
from privatekube.privacy.rdp import (
compute_noise_from_target_epsilon,
ALPHAS,
compute_rdp_sgm,
)
import models
DEFAULT_DATA_PATH = Path(__file__).resolve().parent.parent.parent.joinpath("data")
# Define default args
dataset_args = {
"n_blocks": 200,
"max_text_len": 140,
"vocab_size": 10_000,
"n_blocks_test": 200,
}
input_path_args = {
"dataset_dir": "",
"dataset_monofile": "",
"block_counts": str(DEFAULT_DATA_PATH.joinpath("block_counts.yaml")),
"emb_path": str(DEFAULT_DATA_PATH.joinpath(".vector_cache")),
}
model_args = {
"task": "product",
"model": "bow",
"embedding_dim": 100,
"hidden_dim_1": 240,
"hidden_dim_2": 195,
"hidden_dim": 100,
"dropout": 0.25,
}
training_args = {
"device": "cuda",
"learning_rate": 0.01,
"dp": 0,
"dp_eval": 0,
"user_level": 0,
"epsilon": 5.0,
"delta": 1e-5,
"n_epochs": 15,
"batch_size": 64,
"virtual_batch_multiplier": 2,
"adaptive_batch_size": 1,
"noise": -1.0,
"timeframe_days": 0,
"learning_rate_scheduler": 1,
"dynamic_clipping": 0,
"max_grad_norm": 1.0,
"per_layer_clipping": 0,
"n_workers": 6,
"non_dp_batch_size": 256,
}
output_args = {
"log_path": "",
"model_path": "",
"metrics_path": "",
}
build_flags(dataset_args, model_args, training_args, input_path_args, output_args)
FLAGS = flags.FLAGS
np.random.seed(0)
def build_split_dataset():
block_dir = tempfile.mkdtemp()
test_block_dir = tempfile.mkdtemp()
if FLAGS.dataset_dir[0:5] == "gs://":
os.system(
"gcloud auth activate-service-account --key-file=$GOOGLE_APPLICATION_CREDENTIALS"
)
fs = gcsfs.GCSFileSystem(
project=os.get_env("GCP_PROJECT"), token="google_default"
) # Get the local Gcloud token
logging.info("Listing bucket files.")
all_blocks = list(
map(
lambda blob: os.path.basename(blob["name"]),
fs.listdir(FLAGS.dataset_dir),
)
)
logging.info(f"Got {len(all_blocks)} blocks.")
logging.warning(f"The evaluation set is not fixed.")
elif FLAGS.dataset_dir == "":
logging.info("Listing the block names.")
all_blocks = list(load_yaml(FLAGS.block_counts).keys())
else:
all_blocks = os.listdir(FLAGS.dataset_dir)
logging.info(f"Selecting {FLAGS.n_blocks_test} test blocks (fixed randomness).")
test_blocks = np.random.choice(all_blocks, FLAGS.n_blocks_test, replace=False)
for tb in test_blocks:
all_blocks.remove(tb)
# Use every user to the maximum.
def sort_by_user(block_name):
if block_name.endswith(".h5"):
block_name = block_name[: -len(".h5")]
name = block_name.split("-")
user_slice = int(name[1])
return user_slice
logging.info(
f"Selecting as few users as possible.\n Pseudorandom and deterministic (hashed user ids)."
)
selected_blocks = sorted(all_blocks, key=sort_by_user)[0 : FLAGS.n_blocks]
if FLAGS.dataset_dir[0:5] == "gs://":
pool = Pool()
bucket_path = FLAGS.dataset_dir
def download_datasource(block_name):
|
logging.warning("Downloading the blocks in parallel.")
b = pool.map(download_datasource, selected_blocks)
pool.close()
pool.join()
block_names = None
test_block_names = None
elif FLAGS.dataset_dir == "":
block_dir = None
test_block_dir = None
block_names = selected_blocks
test_block_names = test_blocks
else:
for b in selected_blocks:
os.symlink(os.path.join(FLAGS.dataset_dir, b), os.path.join(block_dir, b))
for b in test_blocks:
os.symlink(
os.path.join(FLAGS.dataset_dir, b), os.path.join(test_block_dir, b)
)
block_names = None
test_block_names = None
# Store for the logs
FLAGS.dataset_dir = block_dir
if not FLAGS.dataset_monofile:
if FLAGS.model == "bert":
from_h5 = DEFAULT_DATA_PATH.joinpath("reviews.h5")
else:
from_h5 = DEFAULT_DATA_PATH.joinpath("reviews_custom_vocab.h5")
else:
from_h5 = FLAGS.dataset_monofile
if FLAGS.dp and FLAGS.user_level:
train_data = UserTimeLevelDataset(
blocks_dir=block_dir,
timeframe=FLAGS.timeframe_days * 86400,
from_h5=from_h5,
block_names=block_names,
)
else:
train_data = EventLevelDataset(
blocks_dir=block_dir,
from_h5=from_h5,
block_names=block_names,
)
test_data = EventLevelDataset(
blocks_dir=test_block_dir,
from_h5=from_h5,
block_names=test_block_names,
)
test_data, valid_data = test_data.split([0.75, 0.25])
logging.info(f"Test size: {len(test_data)}\n Valid size: {len(valid_data)}")
# Values from the preprocessing
# (max text len doesn't matter here)
text_field = torchtext.data.Field(
batch_first=True,
use_vocab=True,
init_token="<bos>",
eos_token="<eos>",
pad_token="<pad>",
unk_token="<unk>",
include_lengths=True,
)
build_public_vocab(
text_field,
max_size=FLAGS.vocab_size - 4,
vectors=f"glove.6B.{FLAGS.embedding_dim}d",
unk_init=torch.Tensor.normal_,
vectors_cache=FLAGS.emb_path,
)
return train_data, test_data, valid_data, text_field
def compute_optimal_batch_size(real_batch_size, dataset_len):
logging.info(
f"Computing the optimal batch size. Dataset {dataset_len}, real batch {real_batch_size}"
)
# Under approximate
optimal_batch_size = int(np.sqrt(dataset_len))
if optimal_batch_size <= real_batch_size:
return optimal_batch_size, 0
else:
return (real_batch_size, optimal_batch_size // real_batch_size)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def build_model(text_field):
INPUT_DIM = len(text_field.vocab)
word_embeddings = text_field.vocab.vectors
PAD_IDX = text_field.vocab.stoi[text_field.pad_token]
UNK_IDX = text_field.vocab.stoi[text_field.unk_token]
if FLAGS.task == "sentiment":
output_dim = 1
elif FLAGS.task == "product":
output_dim = 11
if FLAGS.model == "lstm":
model = models.LSTMClassifier(
batch_size=FLAGS.batch_size,
output_size=output_dim,
hidden_size=FLAGS.hidden_dim,
vocab_size=INPUT_DIM,
embedding_length=FLAGS.embedding_dim,
weights=word_embeddings,
dropout=FLAGS.dropout,
dp=FLAGS.dp,
)
elif FLAGS.model == "bow":
model = models.NBOW(
input_dim=word_embeddings.shape[0],
emb_dim=FLAGS.embedding_dim,
output_dim=output_dim,
pad_idx=PAD_IDX,
word_embeddings=word_embeddings,
)
elif FLAGS.model == "feedforward":
model = models.FeedforwardModel(
vocab_size=INPUT_DIM,
embedding_dim=FLAGS.embedding_dim,
pad_idx=PAD_IDX,
H_1=FLAGS.hidden_dim_1,
H_2=FLAGS.hidden_dim_2,
D_out=output_dim,
word_embeddings=word_embeddings,
)
elif FLAGS.model == "bert":
# The dataset has been preprocessed with the bert tokenizer, so the indices should be correct
logging.info(f"Pad and unk index {PAD_IDX, UNK_IDX}")
model = models.FineTunedBert.build_new(output_dim=output_dim)
logging.info(
f"Model {FLAGS.model} has {count_parameters(model)} trainable parameters."
)
# Bert has its own pretrained embeddings
return model
pretrained_embeddings = text_field.vocab.vectors
model.embedding.weight.data.copy_(pretrained_embeddings)
model.embedding.weight.data[UNK_IDX] = torch.zeros(FLAGS.embedding_dim)
model.embedding.weight.data[PAD_IDX] = torch.zeros(FLAGS.embedding_dim)
logging.info(
f"Model {FLAGS.model} has {count_parameters(model)} trainable parameters."
)
return model
def train(model, iterator, optimizer, criterion, accuracy_fn):
epoch_loss = 0
epoch_acc = 0
model.train()
optimizer.zero_grad()
for i, batch in enumerate(tqdm(iterator)):
# batch = batch.to(FLAGS.device)
if FLAGS.task == "sentiment":
data, label = split_review_batch(
batch,
label_feature="binary_rating",
max_text_len=FLAGS.max_text_len,
include_len=True,
vocab_size=FLAGS.vocab_size,
custom_vocab=(FLAGS.model != "bert"),
)
text_lengths, text = data
elif FLAGS.task == "product":
text, label = split_review_batch(
batch,
label_feature="category",
max_text_len=FLAGS.max_text_len,
vocab_size=FLAGS.vocab_size,
custom_vocab=(FLAGS.model != "bert"),
)
text = text.to(device=FLAGS.device, dtype=torch.long)
label = (
label.to(device=FLAGS.device, dtype=torch.long)
if FLAGS.task == "product"
else label.to(device=FLAGS.device, dtype=torch.float)
)
if FLAGS.model == "lstm":
hidden = model.init_hidden(batch_size=len(batch))
if isinstance(hidden, tuple):
hidden = (
hidden[0].to(FLAGS.device),
hidden[1].to(FLAGS.device),
)
else:
hidden = hidden.to(FLAGS.device)
outputs = model(text, hidden)
elif FLAGS.model == "bert":
PAD_IDX = 0
inputs = {
"input_ids": text,
"labels": label,
"attention_mask": torch.where(
text == PAD_IDX, torch.zeros_like(text), torch.ones_like(text)
),
}
# logging.info(f"Inputs {inputs}")
# The model outputs loss, logits
outputs = model(**inputs)[1]
# logging.info(f"Outputs {outputs}")
else:
outputs = model(text)
# logging.info(f"Outputs {outputs}")
if FLAGS.task == "sentiment":
outputs = outputs.squeeze(1)
loss = criterion(outputs, label)
acc = accuracy_fn(outputs.detach(), label)
loss.backward()
if FLAGS.dp and FLAGS.virtual_batch_multiplier > 1:
# NOTE: step is not called at every minibatch, so the RDP accountant need to know this
if (i + 1) % FLAGS.virtual_batch_multiplier == 0 or (i + 1) == len(
iterator
):
# For the (virtual_batch_multiplier)th batch, call a clip-noise-step
optimizer.step()
optimizer.zero_grad()
else:
# For the first (virtual_batch_multiplier - 1) batches, just accumulate the gradients
optimizer.virtual_step()
else:
# Regular optimizer step (either non-DP or DP with no virtual step)
optimizer.step()
optimizer.zero_grad()
epoch_loss += loss.item()
# epoch_loss += loss.detach().item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def evaluate(model, iterator, criterion, accuracy_fn):
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for batch in iterator:
# batch = batch.to(FLAGS.device)
if FLAGS.task == "sentiment":
data, label = split_review_batch(
batch,
label_feature="binary_rating",
max_text_len=FLAGS.max_text_len,
include_len=True,
vocab_size=FLAGS.vocab_size,
custom_vocab=(FLAGS.model != "bert"),
)
text_lengths, text = data
elif FLAGS.task == "product":
text, label = split_review_batch(
batch,
label_feature="category",
max_text_len=FLAGS.max_text_len,
vocab_size=FLAGS.vocab_size,
custom_vocab=(FLAGS.model != "bert"),
)
text = text.to(device=FLAGS.device, dtype=torch.long)
label = (
label.to(device=FLAGS.device, dtype=torch.long)
if FLAGS.task == "product"
else label.to(device=FLAGS.device, dtype=torch.float)
)
if FLAGS.model == "lstm":
hidden = model.init_hidden(batch_size=len(batch))
if isinstance(hidden, tuple):
hidden = (
hidden[0].to(FLAGS.device),
hidden[1].to(FLAGS.device),
)
else:
hidden = hidden.to(FLAGS.device)
outputs = model(text, hidden)
elif FLAGS.model == "bert":
PAD_IDX = 0
inputs = {
"input_ids": text,
"labels": label,
"attention_mask": torch.where(
text == PAD_IDX, torch.zeros_like(text), torch.ones_like(text)
),
}
outputs = model(**inputs)[1]
else:
outputs = model(text)
if FLAGS.task == "sentiment":
outputs = outputs.squeeze(1)
# print(f"Training. Outputs: {outputs}, labels: {batch.label}")
loss = criterion(outputs, label)
acc = accuracy_fn(outputs, label)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def train_validate(
train_data, valid_data, model, optimizer, criterion, accuracy_fn, scheduler
):
validation_accuracy_epochs = []
validation_loss_epochs = []
training_loss_epochs = []
training_accuracy_epochs = []
logging.info(f"n workers: {FLAGS.n_workers}")
train_iterator = torch.utils.data.DataLoader(
train_data,
batch_size=FLAGS.batch_size,
shuffle=True,
num_workers=FLAGS.n_workers,
drop_last=True,
)
valid_iterator = torch.utils.data.DataLoader(
valid_data,
batch_size=FLAGS.batch_size,
shuffle=True,
num_workers=FLAGS.n_workers,
drop_last=False,
)
criterion = criterion.to(FLAGS.device)
best_valid_loss = float("inf")
for epoch in range(FLAGS.n_epochs):
start_time = time.time()
logging.info(f"Starting epoch {epoch + 1}.")
train_loss, train_acc = train(
model, train_iterator, optimizer, criterion, accuracy_fn
)
valid_loss, valid_acc = evaluate(model, valid_iterator, criterion, accuracy_fn)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), "tut2-model.pt")
logging.info(f"Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s")
logging.info(
f"\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%"
)
scheduler.step(train_loss)
logging.info(
f"\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%"
)
validation_accuracy_epochs.append(valid_acc)
validation_loss_epochs.append(valid_loss)
training_loss_epochs.append(train_loss)
training_accuracy_epochs.append(train_acc)
return (
training_loss_epochs,
training_accuracy_epochs,
validation_loss_epochs,
validation_accuracy_epochs,
)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def main(argv):
start_time = time.time()
# Convert flags for the epsilon = -1 shortcut
if FLAGS.dp and FLAGS.epsilon < 0 and FLAGS.noise < 0:
FLAGS.dp = False
# No multiprocessing for large datasets (save RAM)
if FLAGS.n_blocks > 50_000:
logging.info(f"Large dataset, we use a single thread for the loader.")
FLAGS.n_workers = 0
# Build the dataset, either event level or user level
train_data, test_data, valid_data, text_field = build_split_dataset()
logging.info(
f"Number of samples for training: {len(train_data)}, validation: {len(valid_data)} and testing: {len(test_data)}"
)
# Adapt the batch size and the virtual step size, unless it has been specified manually
if FLAGS.dp and FLAGS.adaptive_batch_size and FLAGS.virtual_batch_multiplier <= 0:
FLAGS.batch_size, FLAGS.virtual_batch_multiplier = compute_optimal_batch_size(
FLAGS.batch_size, len(train_data)
)
logging.info(
f"Using real batch {FLAGS.batch_size} with multiplier {FLAGS.virtual_batch_multiplier}"
)
if not FLAGS.dp:
FLAGS.batch_size = FLAGS.non_dp_batch_size
# Prepare the model and optimizer
model = build_model(text_field).to(FLAGS.device)
logging.info(f"Number of trainable parameters: {count_parameters(model)}")
# optimizer = optim.Adam(model.parameters())
optimizer = optim.AdamW(model.parameters(), lr=FLAGS.learning_rate, eps=1e-8)
scheduler = ReduceLROnPlateau(optimizer, mode="min", patience=3)
# train_it = torch.utils.data.DataLoader(
# train_data,
# batch_size=2048,
# shuffle=False,
# num_workers=FLAGS.n_workers,
# drop_last=False,
# )
# counts = {}
# for i in range(11):
# counts[i] = 0
# for b in train_it:
# for cat in b[:, 3]:
# counts[int(cat)] += 1
# s = sum(counts.values())
# for cat, count in counts.items():
# counts[cat] = count / s
# logging.info(counts)
if FLAGS.task == "sentiment":
criterion = nn.BCEWithLogitsLoss().to(FLAGS.device)
accuracy_fn = binary_accuracy
# automotive: 0.03036145803296712
# books: 0.41258122723567553
# cds: 0.012897189083383703
# clothing: 0.2025265712144095
# games: 0.031613111956201506
# groceries: 0.01949595483554337
# home: 0.119920985593197
# movies: 0.0484712255807162
# pets: 0.03665525816121956
# sports: 0.04961580907019007
# tools: 0.035861209236496445
elif FLAGS.task == "product":
# criterion = nn.CrossEntropyLoss(
# weight=torch.Tensor(
# [0.05, 0.035, 0.03, 0.035, 0.05, 0.02, 0.12, 0.01, 0.03, 0.20, 0.41]
# )
# )
criterion = nn.CrossEntropyLoss()
accuracy_fn = multiclass_accuracy
# Plug Opacus if DP training is activated
if FLAGS.dp:
if FLAGS.noise >= 0:
logging.info(f"User-provided noise: {FLAGS.noise}.")
else:
logging.info("Computing noise for the given parameters.")
FLAGS.noise = compute_noise_from_target_epsilon(
target_epsilon=FLAGS.epsilon,
target_delta=FLAGS.delta,
epochs=FLAGS.n_epochs,
batch_size=FLAGS.batch_size * FLAGS.virtual_batch_multiplier
if FLAGS.virtual_batch_multiplier > 0
else FLAGS.batch_size,
dataset_size=len(train_data),
alphas=ALPHAS,
)
logging.info(f"Noise computed from RDP budget: {FLAGS.noise}.")
# NOTE: when user-level DP is activated, the training dataset __len__ method returns
# the number of users, and the DataLoader calls the batch-of-user method that overrides
# the regular __getitem__ method
# WARNING: fishy non-DP adaptive clipping
privacy_engine = opacus.PrivacyEngine(
module=model,
batch_size=FLAGS.batch_size * FLAGS.virtual_batch_multiplier
if FLAGS.virtual_batch_multiplier > 0
else FLAGS.batch_size,
sample_size=len(train_data),
alphas=ALPHAS,
noise_multiplier=FLAGS.noise,
max_grad_norm=FLAGS.max_grad_norm,
experimental=bool(FLAGS.dynamic_clipping),
clipping_method=FLAGS.dynamic_clipping,
clip_per_layer=bool(FLAGS.per_layer_clipping),
)
privacy_engine.attach(optimizer)
# Do the actual training
t = time.time()
(
training_loss_epochs,
training_accuracy_epochs,
validation_loss_epochs,
validation_accuracy_epochs,
) = train_validate(
train_data, valid_data, model, optimizer, criterion, accuracy_fn, scheduler
)
training_time = time.time() - t
if FLAGS.dp:
epsilon_consumed, best_alpha = optimizer.privacy_engine.get_privacy_spent(
FLAGS.delta
)
epsilon_consumed = float(epsilon_consumed)
best_alpha = float(best_alpha)
logging.info(f"Best alpha: {best_alpha}")
rdp_epsilons_consumed = (
optimizer.privacy_engine.get_renyi_divergence()
* optimizer.privacy_engine.steps
).tolist()
logging.info(f"RDP budget consumed: {rdp_epsilons_consumed} for orders.")
# Identical to planned budget when we don't have early stopping
# rdp_epsilon_planned = compute_rdp_sgm(
# epochs=FLAGS.n_epochs,
# batch_size=FLAGS.batch_size * FLAGS.virtual_batch_multiplier
# if FLAGS.virtual_batch_multiplier > 0
# else FLAGS.batch_size,
# dataset_size=len(train_data),
# noise=FLAGS.noise,
# alphas=ALPHAS,
# )
# logging.info(f"Planned RDP budget: {rdp_epsilon_planned}")
else:
epsilon_consumed = None
rdp_epsilons_consumed = None
best_alpha = None
# Evaluate the model (non-DP evaluation here)
testing_size = len(test_data)
test_iterator = torch.utils.data.DataLoader(
test_data,
batch_size=FLAGS.batch_size,
shuffle=True,
num_workers=FLAGS.n_workers,
drop_last=False,
)
final_loss, final_accuracy = evaluate(model, test_iterator, criterion, accuracy_fn)
# Collect the metrics and the logs
logs = {
"training_time": training_time,
"total_time": time.time() - start_time,
"test_size": testing_size,
"n_trainable_parameters": count_parameters(model),
}
# Update the logs with the training data
if isinstance(train_data, UserTimeLevelDataset):
logs["train_size"] = train_data.get_n_events()
logs["n_train_users"] = len(train_data)
else:
logs["train_size"] = len(train_data)
logs.update(
flags_to_dict(dataset_args, model_args, training_args)
) # Dump the configuration flags
metrics = {
"accuracy": final_accuracy,
"training_loss_epochs": training_loss_epochs,
"training_accuracy_epochs": training_accuracy_epochs,
"validation_loss_epochs": validation_loss_epochs,
"validation_accuracy_epochs": validation_accuracy_epochs,
"loss": final_loss,
"epsilon": epsilon_consumed,
"target_epsilon": FLAGS.epsilon,
"alphas": ALPHAS,
"rdp_epsilons": rdp_epsilons_consumed,
"best_alpha": best_alpha,
# "dataset_files": os.listdir(FLAGS.dataset_dir),
}
# Save or logging.info the outputs
# Useless to separate for our experiments
if FLAGS.metrics_path != "":
save_yaml(FLAGS.metrics_path, metrics)
logging.info(f"Saved metrics: {FLAGS.metrics_path}")
else:
logging.info("Metrics not saved but concatenated to the logs.")
logs.update(metrics)
if FLAGS.log_path != "":
save_yaml(FLAGS.log_path, logs)
logging.info(f"Saved logs: {FLAGS.log_path}")
if FLAGS.model_path != "":
save_model(FLAGS.model_path, model)
logging.info(f"Saved model: {FLAGS.model_path}")
logging.info(logs)
logging.info(metrics)
if __name__ == "__main__":
app.run(main)
| block_path = os.path.join(bucket_path, block_name)
dest = os.path.join(block_dir, block_name)
os.system(f"gsutil cp {block_path} {dest}")
return |
debug_test.go | // Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// TODO: This test could be implemented on all (most?) UNIXes if we
// added syscall.Tgkill more widely.
// We skip all of these tests under race mode because our test thread
// spends all of its time in the race runtime, which isn't a safe
// point.
//go:build amd64 && linux && !race
// +build amd64,linux,!race
package runtime_test
import (
"fmt"
"internal/abi"
"internal/goexperiment"
"math"
"os"
"regexp"
"runtime"
"runtime/debug"
"sync/atomic"
"syscall"
"testing"
)
func startDebugCallWorker(t *testing.T) (g *runtime.G, after func()) {
// This can deadlock if run under a debugger because it
// depends on catching SIGTRAP, which is usually swallowed by
// a debugger.
skipUnderDebugger(t)
// This can deadlock if there aren't enough threads or if a GC
// tries to interrupt an atomic loop (see issue #10958). We
// use 8 Ps so there's room for the debug call worker,
// something that's trying to preempt the call worker, and the
// goroutine that's trying to stop the call worker.
ogomaxprocs := runtime.GOMAXPROCS(8)
ogcpercent := debug.SetGCPercent(-1)
// ready is a buffered channel so debugCallWorker won't block
// on sending to it. This makes it less likely we'll catch
// debugCallWorker while it's in the runtime.
ready := make(chan *runtime.G, 1)
var stop uint32
done := make(chan error)
go debugCallWorker(ready, &stop, done)
g = <-ready
return g, func() {
atomic.StoreUint32(&stop, 1)
err := <-done
if err != nil {
t.Fatal(err)
}
runtime.GOMAXPROCS(ogomaxprocs)
debug.SetGCPercent(ogcpercent)
}
}
func debugCallWorker(ready chan<- *runtime.G, stop *uint32, done chan<- error) {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ready <- runtime.Getg()
x := 2
debugCallWorker2(stop, &x)
if x != 1 {
done <- fmt.Errorf("want x = 2, got %d; register pointer not adjusted?", x)
}
close(done)
}
// Don't inline this function, since we want to test adjusting
// pointers in the arguments.
//
//go:noinline
func debugCallWorker2(stop *uint32, x *int) {
for atomic.LoadUint32(stop) == 0 {
// Strongly encourage x to live in a register so we
// can test pointer register adjustment.
*x++
}
*x = 1
}
func debugCallTKill(tid int) error {
return syscall.Tgkill(syscall.Getpid(), tid, syscall.SIGTRAP)
}
// skipUnderDebugger skips the current test when running under a
// debugger (specifically if this process has a tracer). This is
// Linux-specific.
func skipUnderDebugger(t *testing.T) {
pid := syscall.Getpid()
status, err := os.ReadFile(fmt.Sprintf("/proc/%d/status", pid))
if err != nil {
t.Logf("couldn't get proc tracer: %s", err)
return
}
re := regexp.MustCompile(`TracerPid:\s+([0-9]+)`)
sub := re.FindSubmatch(status)
if sub == nil {
t.Logf("couldn't find proc tracer PID")
return
}
if string(sub[1]) == "0" {
return
}
t.Skip("test will deadlock under a debugger")
}
func TestDebugCall(t *testing.T) {
g, after := startDebugCallWorker(t)
defer after()
type stackArgs struct {
x0 int
x1 float64
y0Ret int
y1Ret float64
}
// Inject a call into the debugCallWorker goroutine and test
// basic argument and result passing.
fn := func(x int, y float64) (y0Ret int, y1Ret float64) {
return x + 1, y + 1.0
}
var args *stackArgs
var regs abi.RegArgs
intRegs := regs.Ints[:]
floatRegs := regs.Floats[:]
fval := float64(42.0)
if goexperiment.RegabiArgs {
intRegs[0] = 42
floatRegs[0] = math.Float64bits(fval)
} else {
args = &stackArgs{
x0: 42,
x1: 42.0,
}
}
if _, err := runtime.InjectDebugCall(g, fn, ®s, args, debugCallTKill, false); err != nil {
t.Fatal(err)
}
var result0 int
var result1 float64
if goexperiment.RegabiArgs {
result0 = int(intRegs[0])
result1 = math.Float64frombits(floatRegs[0])
} else {
result0 = args.y0Ret
result1 = args.y1Ret
}
if result0 != 43 {
t.Errorf("want 43, got %d", result0)
}
if result1 != fval+1 {
t.Errorf("want 43, got %f", result1)
}
}
func TestDebugCallLarge(t *testing.T) {
g, after := startDebugCallWorker(t)
defer after()
// Inject a call with a large call frame.
const N = 128
var args struct {
in [N]int
out [N]int
}
fn := func(in [N]int) (out [N]int) {
for i := range in {
out[i] = in[i] + 1
}
return
}
var want [N]int
for i := range args.in {
args.in[i] = i
want[i] = i + 1
}
if _, err := runtime.InjectDebugCall(g, fn, nil, &args, debugCallTKill, false); err != nil {
t.Fatal(err)
}
if want != args.out {
t.Fatalf("want %v, got %v", want, args.out)
}
}
func TestDebugCallGC(t *testing.T) {
g, after := startDebugCallWorker(t)
defer after()
// Inject a call that performs a GC.
if _, err := runtime.InjectDebugCall(g, runtime.GC, nil, nil, debugCallTKill, false); err != nil {
t.Fatal(err)
}
}
func TestDebugCallGrowStack(t *testing.T) {
g, after := startDebugCallWorker(t)
defer after()
// Inject a call that grows the stack. debugCallWorker checks
// for stack pointer breakage.
if _, err := runtime.InjectDebugCall(g, func() { growStack(nil) }, nil, nil, debugCallTKill, false); err != nil {
t.Fatal(err)
}
}
//go:nosplit
func debugCallUnsafePointWorker(gpp **runtime.G, ready, stop *uint32) {
// The nosplit causes this function to not contain safe-points
// except at calls.
runtime.LockOSThread()
defer runtime.UnlockOSThread()
*gpp = runtime.Getg()
for atomic.LoadUint32(stop) == 0 {
atomic.StoreUint32(ready, 1)
}
}
func TestDebugCallUnsafePoint(t *testing.T) |
func TestDebugCallPanic(t *testing.T) {
skipUnderDebugger(t)
// This can deadlock if there aren't enough threads.
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8))
ready := make(chan *runtime.G)
var stop uint32
defer atomic.StoreUint32(&stop, 1)
go func() {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ready <- runtime.Getg()
for atomic.LoadUint32(&stop) == 0 {
}
}()
g := <-ready
p, err := runtime.InjectDebugCall(g, func() { panic("test") }, nil, nil, debugCallTKill, false)
if err != nil {
t.Fatal(err)
}
if ps, ok := p.(string); !ok || ps != "test" {
t.Fatalf("wanted panic %v, got %v", "test", p)
}
}
| {
skipUnderDebugger(t)
// This can deadlock if there aren't enough threads or if a GC
// tries to interrupt an atomic loop (see issue #10958).
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8))
defer debug.SetGCPercent(debug.SetGCPercent(-1))
// Test that the runtime refuses call injection at unsafe points.
var g *runtime.G
var ready, stop uint32
defer atomic.StoreUint32(&stop, 1)
go debugCallUnsafePointWorker(&g, &ready, &stop)
for atomic.LoadUint32(&ready) == 0 {
runtime.Gosched()
}
_, err := runtime.InjectDebugCall(g, func() {}, nil, nil, debugCallTKill, true)
if msg := "call not at safe point"; err == nil || err.Error() != msg {
t.Fatalf("want %q, got %s", msg, err)
}
} |
model_create_edge_application_request_dto.go | package model
import (
"github.com/huaweicloud/huaweicloud-sdk-go-v3/core/utils"
"errors"
"github.com/huaweicloud/huaweicloud-sdk-go-v3/core/converter"
"strings"
)
type CreateEdgeApplicationRequestDto struct {
// 应用ID
EdgeAppId string `json:"edge_app_id"`
// 应用描述
Description *string `json:"description,omitempty"`
// 功能类型,分为数据处理(DATA_PROCESSING)和协议解析(PROTOCOL_PARSING)和IT集成(ON_PREMISE_INTEGRATION),数据默认为DATA_PROCESSING,数据处理模块可以传输消息,协议解析为驱动类型,IT集成为部署南向3rdIA使用
FunctionType *CreateEdgeApplicationRequestDtoFunctionType `json:"function_type,omitempty"`
}
func (o CreateEdgeApplicationRequestDto) String() string {
data, err := utils.Marshal(o)
if err != nil {
return "CreateEdgeApplicationRequestDto struct{}"
}
return strings.Join([]string{"CreateEdgeApplicationRequestDto", string(data)}, " ")
}
type CreateEdgeApplicationRequestDtoFunctionType struct {
value string
}
type CreateEdgeApplicationRequestDtoFunctionTypeEnum struct {
DATA_PROCESSING CreateEdgeApplicationRequestDtoFunctionType
PROTOCOL_PARSING CreateEdgeApplicationRequestDtoFunctionType
ON_PREMISE_INTEGRATION CreateEdgeApplicationRequestDtoFunctionType
GATEWAY_MANAGER CreateEdgeApplicationRequestDtoFunctionType
}
func GetCreateEdgeApplicationRequestDtoFunctionTypeEnum() CreateEdgeApplicationRequestDtoFunctionTypeEnum {
return CreateEdgeApplicationRequestDtoFunctionTypeEnum{
DATA_PROCESSING: CreateEdgeApplicationRequestDtoFunctionType{
value: "DATA_PROCESSING", | },
ON_PREMISE_INTEGRATION: CreateEdgeApplicationRequestDtoFunctionType{
value: "ON_PREMISE_INTEGRATION",
},
GATEWAY_MANAGER: CreateEdgeApplicationRequestDtoFunctionType{
value: "GATEWAY_MANAGER",
},
}
}
func (c CreateEdgeApplicationRequestDtoFunctionType) MarshalJSON() ([]byte, error) {
return utils.Marshal(c.value)
}
func (c *CreateEdgeApplicationRequestDtoFunctionType) UnmarshalJSON(b []byte) error {
myConverter := converter.StringConverterFactory("string")
if myConverter != nil {
val, err := myConverter.CovertStringToInterface(strings.Trim(string(b[:]), "\""))
if err == nil {
c.value = val.(string)
return nil
}
return err
} else {
return errors.New("convert enum data to string error")
}
} | },
PROTOCOL_PARSING: CreateEdgeApplicationRequestDtoFunctionType{
value: "PROTOCOL_PARSING", |
term_test.go | // Copyright 2016 The OPA Authors. All rights reserved.
// Use of this source code is governed by an Apache2
// license that can be found in the LICENSE file.
package ast
import (
"encoding/json"
"fmt"
"reflect"
"strings"
"testing"
"github.com/open-policy-agent/opa/util"
)
func TestInterfaceToValue(t *testing.T) {
// Test util package unmarshalled inputs
input := `
{
"x": [
1,
true,
false,
null,
"hello",
["goodbye", 1],
{"y": 3.1}
]
}
`
var x interface{}
if err := util.UnmarshalJSON([]byte(input), &x); err != nil {
t.Fatal(err)
}
expected := MustParseTerm(input).Value
v, err := InterfaceToValue(x)
if err != nil {
t.Fatal(err)
}
if v.Compare(expected) != 0 {
t.Fatalf("Expected %v but got: %v", expected, v)
}
// Test standard JSON package unmarshalled inputs
if err := json.Unmarshal([]byte(input), &x); err != nil {
t.Fatal(err)
}
expected = MustParseTerm(input).Value
if v, err = InterfaceToValue(x); err != nil {
t.Fatal(err)
}
if expected.Compare(v) != 0 {
t.Fatalf("Expected %v but got: %v", expected, v)
}
// Test misc. types
tests := []struct {
input interface{}
expected string
}{
{int64(100), "100"},
{float64(100), "100"},
{int(100), "100"},
{map[string]string{"foo": "bar"}, `{"foo": "bar"}`},
{uint64(100), "100"},
}
for _, tc := range tests {
expected := MustParseTerm(tc.expected).Value
v, err := InterfaceToValue(tc.input)
if err != nil {
t.Fatal(err)
}
if v.Compare(expected) != 0 {
t.Fatalf("Expected %v but got: %v", expected, v)
}
}
}
func TestObjectInsertGetLen(t *testing.T) {
tests := []struct {
insert [][2]string
expected map[string]string
}{
{[][2]string{{`null`, `value1`}, {`null`, `value2`}}, map[string]string{`null`: `value2`}},
{[][2]string{{`false`, `value`}, {`true`, `value1`}, {`true`, `value2`}}, map[string]string{`false`: `value`, `true`: `value2`}},
{[][2]string{{`0`, `value`}, {`1`, `value1`}, {`1`, `value2`}, {`1.5`, `value`}}, map[string]string{`0`: `value`, `1`: `value2`, `1.5`: `value`}},
{[][2]string{{`"string"`, `value1`}, {`"string"`, `value2`}}, map[string]string{`"string"`: `value2`}},
{[][2]string{{`["other"]`, `value1`}, {`["other"]`, `value2`}}, map[string]string{`["other"]`: `value2`}},
}
for _, tc := range tests {
o := NewObject()
for _, kv := range tc.insert {
o.Insert(MustParseTerm(kv[0]), MustParseTerm(kv[1]))
if v := o.Get(MustParseTerm(kv[0])); v == nil || !MustParseTerm(kv[1]).Equal(v) {
t.Errorf("Expected the object to contain %v", v)
}
}
if o.Len() != len(tc.expected) {
t.Errorf("Expected the object to have %v entries", len(tc.expected))
}
for k, v := range tc.expected {
if x := o.Get(MustParseTerm(k)); x == nil || !MustParseTerm(v).Equal(x) {
t.Errorf("Expected the object to contain %v", k)
}
}
}
}
func TestObjectSetOperations(t *testing.T) {
a := MustParseTerm(`{"a": "b", "c": "d"}`).Value.(Object)
b := MustParseTerm(`{"c": "q", "d": "e"}`).Value.(Object)
r1 := a.Diff(b)
if r1.Compare(MustParseTerm(`{"a": "b"}`).Value) != 0 {
t.Errorf(`Expected a.Diff(b) to equal {"a": "b"} but got: %v`, r1)
}
r2 := a.Intersect(b)
var expectedTerms []*Term
MustParseTerm(`["c", "d", "q"]`).Value.(*Array).Foreach(func(t *Term) {
expectedTerms = append(expectedTerms, t)
})
if len(r2) != 1 || !termSliceEqual(r2[0][:], expectedTerms) {
t.Errorf(`Expected a.Intersect(b) to equal [["a", "d", "q"]] but got: %v`, r2)
}
if r3, ok := a.Merge(b); ok {
t.Errorf("Expected a.Merge(b) to fail but got: %v", r3)
}
c := MustParseTerm(`{"a": {"b": [1], "c": {"d": 2}}}`).Value.(Object)
d := MustParseTerm(`{"a": {"x": [3], "c": {"y": 4}}}`).Value.(Object)
r3, ok := c.Merge(d)
expected := MustParseTerm(`{"a": {"b": [1], "x": [3], "c": {"d": 2, "y": 4}}}`).Value.(Object)
if !ok || r3.Compare(expected) != 0 {
t.Errorf("Expected c.Merge(d) to equal %v but got: %v", expected, r3)
}
}
func TestObjectFilter(t *testing.T) {
cases := []struct {
note string
object string
filter string
expected string
}{
{
note: "base",
object: `{"a": {"b": {"c": 7, "d": 8}}, "e": 9}`,
filter: `{"a": {"b": {"c": null}}}`,
expected: `{"a": {"b": {"c": 7}}}`,
},
{
note: "multiple roots",
object: `{"a": {"b": {"c": 7, "d": 8}}, "e": 9}`,
filter: `{"a": {"b": {"c": null}}, "e": null}`,
expected: `{"a": {"b": {"c": 7}}, "e": 9}`,
},
{
note: "shared roots",
object: `{"a": {"b": {"c": 7, "d": 8}, "e": 9}}`,
filter: `{"a": {"b": {"c": null}, "e": null}}`,
expected: `{"a": {"b": {"c": 7}, "e": 9}}`,
},
{
note: "empty filter",
object: `{"a": 7}`,
filter: `{}`,
expected: `{}`,
},
{
note: "empty object",
object: `{}`,
filter: `{"a": {"b": null}}`,
expected: `{}`,
},
{
note: "arrays",
object: `{"a": [{"b": 7, "c": 8}, {"d": 9}]}`,
filter: `{"a": {"0": {"b": null}, "1": null}}`,
expected: `{"a": [{"b": 7}, {"d": 9}]}`,
},
{
note: "object with number keys",
object: `{"a": [{"1":["b", "c", "d"]}, {"x": "y"}]}`,
filter: `{"a": {"0": {"1": {"2": null}}}}`,
expected: `{"a": [{"1": ["d"]}]}`,
},
{
note: "sets",
object: `{"a": {"b", "c", "d"}, "x": {"y"}}`,
filter: `{"a": {"b": null, "d": null}, "x": null}`,
expected: `{"a": {"b", "d"}, "x": {"y"}}`,
},
}
for _, tc := range cases {
t.Run(tc.note, func(t *testing.T) {
obj := MustParseTerm(tc.object).Value.(Object)
filterObj := MustParseTerm(tc.filter).Value.(Object)
expected := MustParseTerm(tc.expected).Value.(Object)
actual, err := obj.Filter(filterObj)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if actual.Compare(expected) != 0 {
t.Errorf("Expected:\n\n\t%s\n\nGot:\n\n\t%s\n\n", expected, actual)
}
})
}
}
func TestTermBadJSON(t *testing.T) {
input := `{
"Value": [[
{"Value": [{"Value": "a", "Type": "var"}, {"Value": "x", "Type": "string"}], "Type": "ref"},
{"Value": [{"Value": "x", "Type": "var"}], "Type": "array"}
], [
{"Value": 100, "Type": "array"},
{"Value": "foo", "Type": "string"}
]],
"Type": "object"
}`
term := Term{}
err := util.UnmarshalJSON([]byte(input), &term)
expected := fmt.Errorf("ast: unable to unmarshal term")
if !reflect.DeepEqual(expected, err) {
t.Errorf("Expected %v but got: %v", expected, err)
}
}
func TestTermEqual(t *testing.T) {
assertTermEqual(t, NullTerm(), NullTerm())
assertTermEqual(t, BooleanTerm(true), BooleanTerm(true))
assertTermEqual(t, IntNumberTerm(5), IntNumberTerm(5))
assertTermEqual(t, NumberTerm(json.Number("1e6")), NumberTerm("1000000"))
assertTermEqual(t, StringTerm("a string"), StringTerm("a string"))
assertTermEqual(t, ObjectTerm(), ObjectTerm())
assertTermEqual(t, ArrayTerm(), ArrayTerm())
assertTermEqual(t, ObjectTerm(Item(IntNumberTerm(1), IntNumberTerm(2))), ObjectTerm(Item(IntNumberTerm(1), IntNumberTerm(2))))
assertTermEqual(t, ObjectTerm(Item(IntNumberTerm(1), IntNumberTerm(2)), Item(IntNumberTerm(3), IntNumberTerm(4))), ObjectTerm(Item(IntNumberTerm(1), IntNumberTerm(2)), Item(IntNumberTerm(3), IntNumberTerm(4))))
assertTermEqual(t, ArrayTerm(IntNumberTerm(1), IntNumberTerm(2), IntNumberTerm(3)), ArrayTerm(IntNumberTerm(1), IntNumberTerm(2), IntNumberTerm(3)))
assertTermEqual(t, VarTerm("foo"), VarTerm("foo"))
assertTermEqual(t, RefTerm(VarTerm("foo"), VarTerm("i"), IntNumberTerm(2)), RefTerm(VarTerm("foo"), VarTerm("i"), IntNumberTerm(2)))
assertTermEqual(t, ArrayComprehensionTerm(VarTerm("x"), NewBody(&Expr{Terms: RefTerm(VarTerm("a"), VarTerm("i"))})), ArrayComprehensionTerm(VarTerm("x"), NewBody(&Expr{Terms: RefTerm(VarTerm("a"), VarTerm("i"))})))
assertTermEqual(t, ObjectComprehensionTerm(VarTerm("x"), VarTerm("y"), NewBody(&Expr{Terms: RefTerm(VarTerm("a"), VarTerm("i"))})), ObjectComprehensionTerm(VarTerm("x"), VarTerm("y"), NewBody(&Expr{Terms: RefTerm(VarTerm("a"), VarTerm("i"))})))
assertTermEqual(t, SetComprehensionTerm(VarTerm("x"), NewBody(&Expr{Terms: RefTerm(VarTerm("a"), VarTerm("i"))})), SetComprehensionTerm(VarTerm("x"), NewBody(&Expr{Terms: RefTerm(VarTerm("a"), VarTerm("i"))})))
assertTermNotEqual(t, NullTerm(), BooleanTerm(true))
assertTermNotEqual(t, BooleanTerm(true), BooleanTerm(false))
assertTermNotEqual(t, IntNumberTerm(5), IntNumberTerm(7))
assertTermNotEqual(t, StringTerm("a string"), StringTerm("abc"))
assertTermNotEqual(t, ObjectTerm(Item(IntNumberTerm(3), IntNumberTerm(2))), ObjectTerm(Item(IntNumberTerm(1), IntNumberTerm(2))))
assertTermNotEqual(t, ObjectTerm(Item(IntNumberTerm(1), IntNumberTerm(2)), Item(IntNumberTerm(3), IntNumberTerm(7))), ObjectTerm(Item(IntNumberTerm(1), IntNumberTerm(2)), Item(IntNumberTerm(3), IntNumberTerm(4))))
assertTermNotEqual(t, IntNumberTerm(5), StringTerm("a string"))
assertTermNotEqual(t, IntNumberTerm(1), BooleanTerm(true))
assertTermNotEqual(t, ObjectTerm(Item(IntNumberTerm(1), IntNumberTerm(2)), Item(IntNumberTerm(3), IntNumberTerm(7))), ArrayTerm(IntNumberTerm(1), IntNumberTerm(2), IntNumberTerm(7)))
assertTermNotEqual(t, ArrayTerm(IntNumberTerm(1), IntNumberTerm(2), IntNumberTerm(3)), ArrayTerm(IntNumberTerm(1), IntNumberTerm(2), IntNumberTerm(4)))
assertTermNotEqual(t, VarTerm("foo"), VarTerm("bar"))
assertTermNotEqual(t, RefTerm(VarTerm("foo"), VarTerm("i"), IntNumberTerm(2)), RefTerm(VarTerm("foo"), StringTerm("i"), IntNumberTerm(2)))
assertTermNotEqual(t, ArrayComprehensionTerm(VarTerm("x"), NewBody(&Expr{Terms: RefTerm(VarTerm("a"), VarTerm("j"))})), ArrayComprehensionTerm(VarTerm("x"), NewBody(&Expr{Terms: RefTerm(VarTerm("a"), VarTerm("i"))})))
assertTermNotEqual(t, ObjectComprehensionTerm(VarTerm("x"), VarTerm("y"), NewBody(&Expr{Terms: RefTerm(VarTerm("a"), VarTerm("j"))})), ObjectComprehensionTerm(VarTerm("x"), VarTerm("y"), NewBody(&Expr{Terms: RefTerm(VarTerm("a"), VarTerm("i"))})))
assertTermNotEqual(t, SetComprehensionTerm(VarTerm("x"), NewBody(&Expr{Terms: RefTerm(VarTerm("a"), VarTerm("j"))})), SetComprehensionTerm(VarTerm("x"), NewBody(&Expr{Terms: RefTerm(VarTerm("a"), VarTerm("i"))})))
}
func TestFind(t *testing.T) {
term := MustParseTerm(`{"foo": [1,{"bar": {2,3,4}}], "baz": {"qux": ["hello", "world"]}}`)
tests := []struct {
path *Term
expected interface{}
}{
{RefTerm(StringTerm("foo"), IntNumberTerm(1), StringTerm("bar")), MustParseTerm(`{2, 3, 4}`)},
{RefTerm(StringTerm("foo"), IntNumberTerm(1), StringTerm("bar"), IntNumberTerm(4)), MustParseTerm(`4`)},
{RefTerm(StringTerm("foo"), IntNumberTerm(2)), fmt.Errorf("not found")},
{RefTerm(StringTerm("baz"), StringTerm("qux"), IntNumberTerm(0)), MustParseTerm(`"hello"`)},
}
for _, tc := range tests {
result, err := term.Value.Find(tc.path.Value.(Ref))
switch expected := tc.expected.(type) {
case *Term:
if err != nil {
t.Fatalf("Unexpected error occurred for %v: %v", tc.path, err)
}
if result.Compare(expected.Value) != 0 {
t.Fatalf("Expected value %v for %v but got: %v", expected, tc.path, result)
}
case error:
if err == nil {
t.Fatalf("Expected error but got: %v", result)
}
if !strings.Contains(err.Error(), expected.Error()) {
t.Fatalf("Expected error to contain %v but got: %v", expected, err)
}
default:
panic("bad expected type")
}
}
}
func TestHash(t *testing.T) {
doc := `{"a": [[true, {"b": [null]}, {"c": "d"}]], "e": {100: a[i].b}, "k": ["foo" | true], "o": {"foo": "bar" | true}, "sc": {"foo" | true}, "s": {1, 2, {3, 4}}, "big": 1e+1000}`
stmt1 := MustParseStatement(doc)
stmt2 := MustParseStatement(doc)
obj1 := stmt1.(Body)[0].Terms.(*Term).Value.(Object)
obj2 := stmt2.(Body)[0].Terms.(*Term).Value.(Object)
if obj1.Hash() != obj2.Hash() {
t.Errorf("Expected hash codes to be equal")
}
}
func TestTermIsGround(t *testing.T) {
tests := []struct {
note string
term string
expected bool
}{
{"null", "null", true},
{"string", `"foo"`, true},
{"number", "42.1", true},
{"boolean", "false", true},
{"var", "x", false},
{"ref ground", "a.b[0]", true},
{"ref non-ground", "a.b[i].x", false},
{"array ground", "[1,2,3]", true},
{"array non-ground", "[1,2,x]", false},
{"set ground", "{1,2,3}", true},
{"Set non-ground", "{1,2,x}", false},
{"object ground", `{"a": 1}`, true},
{"object non-ground key", `{"x": 1, y: 2}`, false},
{"object non-ground value", `{"x": 1, "y": y}`, false},
{"array compr ground", `["a" | true]`, true},
{"array compr non-ground", `[x | x = a[i]]`, false},
}
for i, tc := range tests {
term := MustParseTerm(tc.term)
if term.IsGround() != tc.expected {
expected := "ground"
if !tc.expected {
expected = "non-ground"
}
t.Errorf("Expected term %v to be %s (test case %d: %v)", term, expected, i, tc.note)
}
}
}
func TestObjectRemainsGround(t *testing.T) {
tests := []struct {
key string
value string
ground bool
}{
{`"a"`, `"value1"`, true},
{`"b"`, `"value2"`, true},
{`"a"`, `x`, false},
{`"a"`, `"value1"`, true},
{`"b"`, `y`, false},
{`"c"`, `value3`, false},
}
obj := NewObject()
for i, tc := range tests {
obj.Insert(MustParseTerm(tc.key), MustParseTerm(tc.value))
if obj.IsGround() != tc.ground {
t.Errorf("Unexpected object is ground (test case %d)", i)
}
}
}
func TestIsConstant(t *testing.T) {
tests := []struct {
term string
expected bool
}{
{`[{"foo": {true, false, [1, 2]}}]`, true},
{`[{"foo": {x}}]`, false},
}
for _, tc := range tests {
term := MustParseTerm(tc.term)
if IsConstant(term.Value) != tc.expected {
t.Fatalf("Expected IsConstant(%v) = %v", term, tc.expected)
}
}
}
func TestIsScalar(t *testing.T) {
tests := []struct {
term string
expected bool
}{
{"null", true},
{`"string"`, true},
{"3.14", true},
{"false", true},
{"[1,2,3]", false},
{"{1,2,3}", false},
{`{"a": 1}`, false},
{`[x | x = 0]`, false},
}
for _, tc := range tests {
term := MustParseTerm(tc.term)
if IsScalar(term.Value) != tc.expected {
t.Errorf("Expected IsScalar(%v) = %v", term, tc.expected)
}
}
}
func TestTermString(t *testing.T) {
assertToString(t, Null{}, "null")
assertToString(t, Boolean(true), "true")
assertToString(t, Boolean(false), "false")
assertToString(t, Number("4"), "4")
assertToString(t, Number("42.1"), "42.1")
assertToString(t, Number("6e7"), "6e7")
assertToString(t, UIntNumberTerm(uint64(1)).Value, "1")
assertToString(t, String("foo"), "\"foo\"")
assertToString(t, String("\"foo\""), "\"\\\"foo\\\"\"")
assertToString(t, String("foo bar"), "\"foo bar\"")
assertToString(t, Var("foo"), "foo")
assertToString(t, RefTerm(VarTerm("foo"), StringTerm("bar")).Value, "foo.bar")
assertToString(t, RefTerm(VarTerm("foo"), StringTerm("bar"), VarTerm("i"), IntNumberTerm(0), StringTerm("baz")).Value, "foo.bar[i][0].baz")
assertToString(t, RefTerm(VarTerm("foo"), BooleanTerm(false), NullTerm(), StringTerm("bar")).Value, "foo[false][null].bar")
assertToString(t, RefTerm(VarTerm("p"), StringTerm("not")).Value, `p["not"]`)
assertToString(t, RefTerm(CallTerm(VarTerm("f"), VarTerm("x")), IntNumberTerm(0)).Value, "f(x)[0]")
assertToString(t, RefTerm(ArrayTerm(StringTerm("a"), StringTerm("b")), IntNumberTerm(0)).Value, "[\"a\", \"b\"][0]")
assertToString(t, ArrayTerm().Value, "[]")
assertToString(t, ObjectTerm().Value, "{}")
assertToString(t, SetTerm().Value, "set()")
assertToString(t, ArrayTerm(ObjectTerm(Item(VarTerm("foo"), ArrayTerm(RefTerm(VarTerm("bar"), VarTerm("i"))))), StringTerm("foo"), SetTerm(BooleanTerm(true), NullTerm()), FloatNumberTerm(42.1)).Value, "[{foo: [bar[i]]}, \"foo\", {null, true}, 42.1]")
assertToString(t, ArrayComprehensionTerm(ArrayTerm(VarTerm("x")), NewBody(&Expr{Terms: RefTerm(VarTerm("a"), VarTerm("i"))})).Value, `[[x] | a[i]]`)
assertToString(t, ObjectComprehensionTerm(VarTerm("y"), ArrayTerm(VarTerm("x")), NewBody(&Expr{Terms: RefTerm(VarTerm("a"), VarTerm("i"))})).Value, `{y: [x] | a[i]}`)
assertToString(t, SetComprehensionTerm(ArrayTerm(VarTerm("x")), NewBody(&Expr{Terms: RefTerm(VarTerm("a"), VarTerm("i"))})).Value, `{[x] | a[i]}`)
// ensure that objects and sets have deterministic String() results
assertToString(t, SetTerm(VarTerm("y"), VarTerm("x")).Value, "{x, y}")
assertToString(t, ObjectTerm([2]*Term{VarTerm("y"), VarTerm("b")}, [2]*Term{VarTerm("x"), VarTerm("a")}).Value, "{x: a, y: b}")
}
func TestRefHasPrefix(t *testing.T) {
a := MustParseRef("foo.bar.baz")
b := MustParseRef("foo.bar")
c := MustParseRef("foo.bar[0][x]")
if !a.HasPrefix(b) {
t.Error("Expected a.HasPrefix(b)")
}
if b.HasPrefix(a) {
t.Error("Expected !b.HasPrefix(a)")
}
if !c.HasPrefix(b) {
t.Error("Expected c.HasPrefix(b)")
}
}
func TestRefAppend(t *testing.T) {
a := MustParseRef("foo.bar.baz")
b := a.Append(VarTerm("x"))
if !b.Equal(MustParseRef("foo.bar.baz[x]")) {
t.Error("Expected foo.bar.baz[x]")
}
}
func TestRefInsert(t *testing.T) {
ref := MustParseRef("test.ex")
cases := []struct {
pos int
term *Term
expected string
}{
{0, VarTerm("foo"), `foo[test].ex`},
{1, StringTerm("foo"), `test.foo.ex`},
{2, StringTerm("foo"), `test.ex.foo`},
}
for i := range cases {
result := ref.Insert(cases[i].term, cases[i].pos)
expected := MustParseRef(cases[i].expected)
if !expected.Equal(result) {
t.Fatalf("Expected %v (len: %d) but got: %v (len: %d)", expected, len(expected), result, len(result))
}
}
}
func TestRefDynamic(t *testing.T) {
a := MustParseRef("foo.bar[baz.qux].corge")
if a.Dynamic() != 2 {
t.Fatalf("Expected dynamic offset to be baz.qux for foo.bar[baz.qux].corge")
}
if a[:a.Dynamic()].Dynamic() != -1 {
t.Fatalf("Expected dynamic offset to be -1 for foo.bar")
}
if MustParseRef("f(x)[0]").Dynamic() != 0 {
t.Fatalf("Expected dynamic offset to be f(x) for foo.bar[baz.qux].corge")
}
}
func TestRefExtend(t *testing.T) {
a := MustParseRef("foo.bar.baz")
b := MustParseRef("qux.corge")
c := MustParseRef("data")
result := a.Extend(b)
expected := MustParseRef("foo.bar.baz.qux.corge")
if !result.Equal(expected) {
t.Fatalf("Expected %v but got %v", expected, result)
}
result = result.Extend(c)
expected = MustParseRef("foo.bar.baz.qux.corge.data")
if !result.Equal(expected) {
t.Fatalf("Expected %v but got %v", expected, result)
}
}
func TestRefConcat(t *testing.T) {
a := MustParseRef("foo.bar.baz")
terms := []*Term{}
if !a.Concat(terms).Equal(a) {
t.Fatal("Expected no change")
}
terms = append(terms, StringTerm("qux"))
exp := MustParseTerm("foo.bar.baz.qux")
result := a.Concat(terms)
if !result.Equal(exp.Value) {
t.Fatalf("Expected %v but got %v", exp, result)
}
exp = MustParseTerm("foo.bar.baz.qux[0]")
terms = append(terms, IntNumberTerm(0))
result = a.Concat(terms)
if !result.Equal(exp.Value) {
t.Fatalf("Expected %v but got %v", exp, result)
}
exp = MustParseTerm("foo.bar.baz")
if !a.Equal(exp.Value) {
t.Fatalf("Expected %v but got %v (want a to be unchanged)", exp, a)
}
}
func TestRefPtr(t *testing.T) {
cases := []string{
"",
"a",
"a/b",
"/a/b",
"/a/b/",
"a%2Fb",
}
for _, tc := range cases {
ref, err := PtrRef(DefaultRootDocument.Copy(), tc)
if err != nil {
t.Fatal("Unexpected error:", err)
}
ptr, err := ref.Ptr()
if err != nil {
t.Fatal("Unexpected error:", err)
}
roundtrip, err := PtrRef(DefaultRootDocument.Copy(), ptr)
if err != nil {
t.Fatal("Unexpected error:", err)
}
if !ref.Equal(roundtrip) {
t.Fatalf("Expected roundtrip of %q to be equal but got %v and %v", tc, ref, roundtrip)
}
}
if _, err := PtrRef(DefaultRootDocument.Copy(), "2%"); err == nil {
t.Fatalf("Expected error from %q", "2%")
}
ref := Ref{VarTerm("x"), IntNumberTerm(1)}
if _, err := ref.Ptr(); err == nil {
t.Fatal("Expected error from x[1]")
}
}
func TestSetEqual(t *testing.T) {
tests := []struct {
a string
b string
expected bool
}{
{"set()", "set()", true},
{"{1,{2,3},4}", "{1,{2,3},4}", true},
{"{1,{2,3},4}", "{4,{3,2},1}", true},
{"{1,2,{3,4}}", "{1,2,{3,4},1,2,{3,4}}", true},
{"{1,2,3,4}", "{1,2,3}", false},
{"{1,2,3}", "{1,2,3,4}", false},
}
for _, tc := range tests {
a := MustParseTerm(tc.a)
b := MustParseTerm(tc.b)
if a.Equal(b) != tc.expected {
var msg string
if tc.expected {
msg = fmt.Sprintf("Expected %v to equal %v", a, b)
} else {
msg = fmt.Sprintf("Expected %v to NOT equal %v", a, b)
}
t.Errorf(msg)
}
}
}
func TestSetMap(t *testing.T) {
set := MustParseTerm(`{"foo", "bar", "baz", "qux"}`).Value.(Set)
result, err := set.Map(func(term *Term) (*Term, error) {
s := string(term.Value.(String))
if strings.Contains(s, "a") {
return &Term{Value: String(strings.ToUpper(s))}, nil
}
return term, nil
})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
expected := MustParseTerm(`{"foo", "BAR", "BAZ", "qux"}`).Value
if result.Compare(expected) != 0 {
t.Fatalf("Expected map result to be %v but got: %v", expected, result)
}
result, err = set.Map(func(*Term) (*Term, error) {
return nil, fmt.Errorf("oops")
})
if !reflect.DeepEqual(err, fmt.Errorf("oops")) {
t.Fatalf("Expected oops to be returned but got: %v, %v", result, err)
}
}
func TestSetAddContainsLen(t *testing.T) {
tests := []struct {
add []string
expected []string
}{
{[]string{`null`, `null`}, []string{`null`}},
{[]string{`true`, `true`, `false`}, []string{`true`, `false`}},
{[]string{`0`, `1`, `1`, `1.5`}, []string{`0`, `1`, `1.5`}},
{[]string{`"string"`, `"string"`}, []string{`"string"`}},
{[]string{`["other"]`, `["other"]`}, []string{`["other"]`}},
}
for _, tc := range tests {
s := NewSet()
for _, v := range tc.add {
x := MustParseTerm(v)
s.Add(x)
if !s.Contains(x) {
t.Errorf("Expected the set to contain %v", v)
}
}
if s.Len() != len(tc.expected) {
t.Errorf("Expected the set to have %v entries", len(tc.expected))
}
for _, v := range tc.expected {
if !s.Contains(MustParseTerm(v)) |
}
}
}
func TestSetOperations(t *testing.T) {
tests := []struct {
a string
b string
c string
op string
}{
{`{1,2,3,4}`, `{1,3,5}`, `{2,4}`, "-"},
{`{1,3,5}`, `{1,2,3,4}`, `{5,}`, "-"},
{`{1,2,3,4}`, `{1,3,5}`, `{1,3}`, "&"},
{`{1,3,5}`, `{1,2,3,4}`, `{1,3}`, "&"},
{`{1,2,3,4}`, `{1,3,5}`, `{1,2,3,4,5}`, "|"},
{`{1,3,5}`, `{1,2,3,4}`, `{1,2,3,4,5}`, "|"},
}
for _, tc := range tests {
s1 := MustParseTerm(tc.a).Value.(Set)
s2 := MustParseTerm(tc.b).Value.(Set)
s3 := MustParseTerm(tc.c).Value.(Set)
var result Set
if tc.op == "-" {
result = s1.Diff(s2)
} else if tc.op == "&" {
result = s1.Intersect(s2)
} else if tc.op == "|" {
result = s1.Union(s2)
} else {
panic("bad operation")
}
if result.Compare(s3) != 0 {
t.Errorf("Expected %v for %v %v %v but got: %v", s3, tc.a, tc.op, tc.b, result)
}
}
}
func TestSetCopy(t *testing.T) {
orig := MustParseTerm("{1,2,3}")
cpy := orig.Copy()
vis := NewGenericVisitor(func(x interface{}) bool {
if Compare(IntNumberTerm(2), x) == 0 {
x.(*Term).Value = String("modified")
}
return false
})
vis.Walk(orig)
expOrig := MustParseTerm(`{1, "modified", 3}`)
expCpy := MustParseTerm(`{1,2,3}`)
if !expOrig.Equal(orig) {
t.Errorf("Expected %v but got %v", expOrig, orig)
}
if !expCpy.Equal(cpy) {
t.Errorf("Expected %v but got %v", expCpy, cpy)
}
}
func TestArrayOperations(t *testing.T) {
arr := MustParseTerm(`[1,2,3,4]`).Value.(*Array)
getTests := []struct {
input string
expected string
}{
{"x", ""},
{"4.1", ""},
{"-1", ""},
{"4", ""},
{"0", "1"},
{"3", "4"},
}
for _, tc := range getTests {
input := MustParseTerm(tc.input)
result := arr.Get(input)
if result != nil {
if tc.expected != "" {
expected := MustParseTerm(tc.expected)
if expected.Equal(result) {
continue
}
}
} else if tc.expected == "" {
continue
}
t.Errorf("Expected %v.get(%v) => %v but got: %v", arr, input, tc.expected, result)
}
// Iteration, append and slice tests
var results []*Term
tests := []struct {
note string
input string
expected []string
iterator func(arr *Array)
}{
{
"for",
`[1, 2, 3, 4]`,
[]string{"1", "2", "3", "4"},
func(arr *Array) {
for i := 0; i < arr.Len(); i++ {
results = append(results, arr.Elem(i))
}
},
},
{
"foreach",
"[1, 2, 3, 4]",
[]string{"1", "2", "3", "4"},
func(arr *Array) {
arr.Foreach(func(v *Term) {
results = append(results, v)
})
},
},
{
"until",
"[1, 2, 3, 4]",
[]string{"1"},
func(arr *Array) {
arr.Until(func(v *Term) bool {
results = append(results, v)
return len(results) == 1
})
},
},
{
"append",
"[1, 2]",
[]string{"1", "2", "3"},
func(arr *Array) {
arr.Append(MustParseTerm("3")).Foreach(func(v *Term) {
results = append(results, v)
})
},
},
{
"slice",
"[1, 2, 3, 4]",
[]string{"3", "4"},
func(arr *Array) {
arr.Slice(2, 4).Foreach(func(v *Term) {
results = append(results, v)
})
},
},
{
"slice",
"[1, 2, 3, 4]",
[]string{"3", "4"},
func(arr *Array) {
arr.Slice(2, -1).Foreach(func(v *Term) {
results = append(results, v)
})
},
},
}
for _, tc := range tests {
t.Run(tc.note, func(t *testing.T) {
arr := MustParseTerm(tc.input).Value.(*Array)
var expected []*Term
for _, e := range tc.expected {
expected = append(expected, MustParseTerm(e))
}
results = nil
tc.iterator(arr)
if !termSliceEqual(results, expected) {
t.Errorf("Expected iteration to return %v but got %v", expected, results)
}
})
}
}
func TestValueToInterface(t *testing.T) {
// Happy path
term := MustParseTerm(`{
"foo": [1, "two", true, null, {3,
}]
}`)
value, err := JSON(term.Value)
if err != nil {
t.Fatalf("Unexpected error while converting term %v to JSON: %v", term, err)
}
var expected interface{}
if err := util.UnmarshalJSON([]byte(`{"foo": [1, "two", true, null, [3]]}`), &expected); err != nil {
panic(err)
}
if util.Compare(value, expected) != 0 {
t.Fatalf("Expected %v but got: %v", expected, value)
}
// Nested ref value
term = MustParseTerm(`{
"foo": [{data.a.b.c,}]
}`)
_, err = JSON(term.Value)
if err == nil {
t.Fatalf("Expected error from JSON(%v)", term)
}
// Ref key
term = MustParseTerm(`{
data.foo.a: 1
}`)
_, err = JSON(term.Value)
if err == nil {
t.Fatalf("Expected error from JSON(%v)", term)
}
// Requires evaluation
term = MustParseTerm(`{
"foo": [x | x = 1]
}`)
_, err = JSON(term.Value)
if err == nil {
t.Fatalf("Expected error from JSON(%v)", term)
}
// Ordering option
//
// These inputs exercise all of the cases (i.e., sets nested in arrays, object keys, and object values.)
//
a, err := JSONWithOpt(MustParseTerm(`[{{3, 4}: {1, 2}}]`).Value, JSONOpt{SortSets: true})
if err != nil {
t.Fatal(err)
}
b, err := JSONWithOpt(MustParseTerm(`[{{4, 3}: {2, 1}}]`).Value, JSONOpt{SortSets: true})
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(a, b) {
t.Fatalf("expcted %v = %v", a, b)
}
}
func assertTermEqual(t *testing.T, x *Term, y *Term) {
if !x.Equal(y) {
t.Errorf("Failure on equality: \n%s and \n%s\n", x, y)
}
}
func assertTermNotEqual(t *testing.T, x *Term, y *Term) {
if x.Equal(y) {
t.Errorf("Failure on non-equality: \n%s and \n%s\n", x, y)
}
}
func assertToString(t *testing.T, val Value, expected string) {
result := val.String()
if result != expected {
t.Errorf("Expected %v but got %v", expected, result)
}
}
| {
t.Errorf("Expected the set to contain %v", v)
} |
XbrlSemanticSqlDB.py | '''
XbrlSemanticSqlDB.py implements an SQL database interface for Arelle, based
on a concrete realization of the Abstract Model PWD 2.0 layer. This is a semantic
representation of XBRL information.
This module may save directly to a Postgres, MySQL, SQLite, MSSQL, or Oracle server.
This module provides the execution context for saving a dts and instances in
XBRL SQL database. It may be loaded by Arelle's RSS feed, or by individual
DTS and instances opened by interactive or command line/web service mode.
Example dialog or command line parameters for operation:
host: the supporting host for SQL Server
port: the host port of server
user, password: if needed for server
database: the top level path segment for the SQL Server
timeout:
(c) Copyright 2013 Mark V Systems Limited, California US, All rights reserved.
Mark V copyright applies to this software, which is licensed according to the terms of Arelle(r).
to use from command line:
linux
# be sure plugin is installed
arelleCmdLine --plugin '+xbrlDB|show'
arelleCmdLine -f http://sec.org/somewhere/some.rss -v --store-to-XBRL-DB 'myserver.com,portnumber,pguser,pgpasswd,database,timeoutseconds'
windows
rem be sure plugin is installed
arelleCmdLine --plugin "xbrlDB"
arelleCmdLine -f http://sec.org/somewhere/some.rss -v --store-to-XBRL-DB "myserver.com,portnumber,pguser,pgpasswd,database,timeoutseconds"
'''
import os, time, datetime, logging
from arelle.ModelDocument import Type
from arelle.ModelDtsObject import ModelConcept, ModelType, ModelResource, ModelRelationship
from arelle.ModelInstanceObject import ModelFact
from arelle.ModelXbrl import ModelXbrl
from arelle.ModelDocument import ModelDocument
from arelle.ModelObject import ModelObject
from arelle.ModelValue import qname
from arelle.ValidateXbrlCalcs import roundValue
from arelle.XmlValidate import UNVALIDATED, VALID
from arelle.XmlUtil import elementChildSequence
from arelle import XbrlConst
from arelle.UrlUtil import authority, ensureUrl
from .SqlDb import XPDBException, isSqlConnection, SqlDbConnection
from .tableFacts import tableFacts
from .entityInformation import loadEntityInformation
from .primaryDocumentFacts import loadPrimaryDocumentFacts
from collections import defaultdict
def insertIntoDB(modelXbrl,
user=None, password=None, host=None, port=None, database=None, timeout=None,
product=None, entrypoint=None, rssItem=None, **kwargs):
xbrlDbConn = None
try:
xbrlDbConn = XbrlSqlDatabaseConnection(modelXbrl, user, password, host, port, database, timeout, product)
if "rssObject" in kwargs: # initialize batch
xbrlDbConn.initializeBatch(kwargs["rssObject"])
else:
xbrlDbConn.verifyTables()
xbrlDbConn.insertXbrl(entrypoint, rssItem)
xbrlDbConn.close()
except Exception as ex:
if xbrlDbConn is not None:
try:
xbrlDbConn.close(rollback=True)
except Exception as ex2:
pass
raise # reraise original exception with original traceback
def isDBPort(host, port, timeout=10, product="postgres"):
return isSqlConnection(host, port, timeout)
XBRLDBTABLES = {
"filing", "report",
"document", "referenced_documents",
"aspect", "data_type", "role_type", "arcrole_type",
"resource", "relationship_set", "root", "relationship",
"data_point", "entity", "period", "unit", "unit_measure", "aspect_value_selection",
"message", "message_reference",
"industry", "industry_level", "industry_structure",
}
class XbrlSqlDatabaseConnection(SqlDbConnection):
def verifyTables(self):
missingTables = XBRLDBTABLES - self.tablesInDB()
# if no tables, initialize database
if missingTables == XBRLDBTABLES:
self.create(os.path.join("sql", "semantic", {"mssql": "xbrlSemanticMSSqlDB.sql",
"mysql": "xbrlSemanticMySqlDB.ddl",
"sqlite": "xbrlSemanticSQLiteDB.ddl",
"orcl": "xbrlSemanticOracleDB.sql",
"postgres": "xbrlSemanticPostgresDB.ddl"}[self.product]))
missingTables = XBRLDBTABLES - self.tablesInDB()
if missingTables and missingTables != {"sequences"}:
raise XPDBException("sqlDB:MissingTables",
_("The following tables are missing: %(missingTableNames)s"),
missingTableNames=', '.join(t for t in sorted(missingTables)))
def insertXbrl(self, entrypoint, rssItem):
try:
# must also have default dimensions loaded
from arelle import ValidateXbrlDimensions
ValidateXbrlDimensions.loadDimensionDefaults(self.modelXbrl)
# get logging entries (needed to find which aspects to identify)
self.loggingEntries = []
for handler in logging.getLogger("arelle").handlers:
if hasattr(handler, "dbHandlerLogEntries"):
self.loggingEntries = handler.dbHandlerLogEntries()
break
# must have a valid XBRL instance or document
if self.modelXbrl.modelDocument is None:
raise XPDBException("xpgDB:MissingXbrlDocument",
_("No XBRL instance or schema loaded for this filing."))
# obtain supplementaion entity information
self.entityInformation = loadEntityInformation(self.modelXbrl, entrypoint, rssItem)
# identify table facts (table datapoints) (prior to locked database transaction
self.tableFacts = tableFacts(self.modelXbrl) # for EFM & HMRC this is ( (roleType, table_code, fact) )
loadPrimaryDocumentFacts(self.modelXbrl, rssItem, self.entityInformation) # load primary document facts for SEC filing
self.identifyTaxonomyRelSetsOwner()
# at this point we determine what's in the database and provide new tables
# requires locking most of the table structure
self.lockTables(('entity', 'filing', 'report', 'document', 'referenced_documents'),
isSessionTransaction=True) # lock for whole transaction
# find pre-existing documents in server database
self.identifyPreexistingDocuments()
self.identifyAspectsUsed()
self.dropTemporaryTable()
startedAt = time.time()
self.syncSequences = True # for data base types that don't explicity handle sequences
self.insertFiling(rssItem)
self.modelXbrl.profileStat(_("XbrlSqlDB: Filing insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertDocuments()
self.modelXbrl.profileStat(_("XbrlSqlDB: Documents insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertAspects()
self.modelXbrl.profileStat(_("XbrlSqlDB: Aspects insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertArcroleTypes()
self.insertRoleTypes()
self.modelXbrl.profileStat(_("XbrlSqlDB: Role Types insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertResources()
self.modelXbrl.profileStat(_("XbrlSqlDB: Resources insertion"), time.time() - startedAt)
startedAt = time.time()
# self.modelXbrl.profileStat(_("XbrlSqlDB: DTS insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertDataPoints()
self.modelXbrl.profileStat(_("XbrlSqlDB: instance insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertRelationships() # must follow data points for footnote relationships
self.modelXbrl.profileStat(_("XbrlSqlDB: Relationships insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertValidationResults()
self.modelXbrl.profileStat(_("XbrlSqlDB: Validation results insertion"), time.time() - startedAt)
startedAt = time.time()
self.showStatus("Committing entries")
self.commit()
self.modelXbrl.profileStat(_("XbrlSqlDB: insertion committed"), time.time() - startedAt)
self.showStatus("DB insertion completed", clearAfter=5000)
except Exception as ex:
self.showStatus("DB insertion failed due to exception", clearAfter=5000)
raise
def identifyTaxonomyRelSetsOwner(self):
# walk down referenced document set from instance to find 'lowest' taxonomy relationship set ownership
instanceReferencedDocuments = set()
instanceDocuments = set()
inlineXbrlDocSet = None
for mdlDoc in self.modelXbrl.urlDocs.values():
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
instanceDocuments.add(mdlDoc)
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType in ("href", "import", "include"):
instanceReferencedDocuments.add(refDoc)
elif mdlDoc.type == Type.INLINEXBRLDOCUMENTSET:
inlineXbrlDocSet = mdlDoc
if len(instanceReferencedDocuments) > 1:
# filing must own the taxonomy set
if len(instanceDocuments) == 1:
self.taxonomyRelSetsOwner = instanceDocuments.pop()
elif inlineXbrlDocSet is not None: # manifest for inline docs can own the rel sets
self.taxonomyRelSetsOwner = inlineXbrlDocSet
else: # no single instance, pick the entry poin doct
self.taxonomyRelSetsOwner = self.modelXbrl.modelDocument # entry document (instance or inline doc set)
elif len(instanceReferencedDocuments) == 1:
self.taxonomyRelSetsOwner = instanceReferencedDocuments.pop()
elif self.modelXbrl.modelDocument.type == Type.SCHEMA:
self.taxonomyRelSetsOwner = self.modelXbrl.modelDocument
else:
self.taxonomyRelSetsOwner = self.modelXbrl.modelDocument
instanceReferencedDocuments.clear() # dereference
instanceDocuments.clear()
# check whether relationship_set is completely in instance or part/all in taxonomy
self.arcroleInInstance = {}
self.arcroleHasResource = {}
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys():
if ELR is None and linkqname is None and arcqname is None and not arcrole.startswith("XBRL-"):
inInstance = False
hasResource = False
for rel in self.modelXbrl.relationshipSet(arcrole).modelRelationships:
if (not inInstance and
rel.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL) and
any(isinstance(tgtObj, ModelObject) and tgtObj.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL)
for tgtObj in (rel.fromModelObject, rel.toModelObject))):
inInstance = True
if not hasResource and any(isinstance(resource, ModelResource)
for resource in (rel.fromModelObject, rel.toModelObject)):
hasResource = True
if inInstance and hasResource:
break;
self.arcroleInInstance[arcrole] = inInstance
self.arcroleHasResource[arcrole] = hasResource
def initializeBatch(self, rssObject):
results = self.execute("SELECT filing_number, accepted_timestamp FROM filing")
existingFilings = dict((filingNumber, timestamp)
for filingNumber, timestamp in results) # timestamp is a string
for rssItem in rssObject.rssItems:
if (rssItem.accessionNumber in existingFilings and
rssItem.acceptanceDatetime == existingFilings[rssItem.accessionNumber]):
rssItem.skipRssItem = True
def insertFiling(self, rssItem):
now = datetime.datetime.now()
entityInfo = self.entityInformation
def rssItemGet(propertyName):
if rssItem is not None:
return getattr(rssItem, propertyName, None)
return None
self.showStatus("insert entity")
LEI = None
entity_comparator = ('legal_entity_number', 'file_number') if LEI else ('file_number',)
table = self.getTable('entity', 'entity_id',
('legal_entity_number',
'file_number',
'reference_number', # CIK
'tax_number',
'standard_industry_code',
'name',
'legal_state',
'phone',
'phys_addr1', 'phys_addr2', 'phys_city', 'phys_state', 'phys_zip', 'phys_country',
'mail_addr1', 'mail_addr2', 'mail_city', 'mail_state', 'mail_zip', 'mail_country',
'fiscal_year_end',
'filer_category',
'public_float',
'trading_symbol'),
entity_comparator, # cannot compare None = None if LEI is absent, always False
((LEI,
rssItemGet("fileNumber") or entityInfo.get("file-number") or str(int(time.time())),
rssItemGet("cikNumber") or entityInfo.get("cik"),
entityInfo.get("irs-number"),
rssItemGet("assignedSic") or entityInfo.get("assigned-sic") or -1,
rssItemGet("companyName") or entityInfo.get("conformed-name"),
entityInfo.get("state-of-incorporation"),
entityInfo.get("business-address.phone"),
entityInfo.get("business-address.street1"),
entityInfo.get("business-address.street2"),
entityInfo.get("business-address.city"),
entityInfo.get("business-address.state"),
entityInfo.get("business-address.zip"),
countryOfState.get(entityInfo.get("business-address.state")),
entityInfo.get("mail-address.street1"),
entityInfo.get("mail-address.street2"),
entityInfo.get("mail-address.city"),
entityInfo.get("mail-address.state"),
entityInfo.get("mail-address.zip"),
countryOfState.get(entityInfo.get("mail-address.state")),
rssItemGet("fiscalYearEnd") or entityInfo.get("fiscal-year-end"),
entityInfo.get("filer-category"),
entityInfo.get("public-float"),
entityInfo.get("trading-symbol")
),),
checkIfExisting=True,
returnExistenceStatus=True)
if LEI:
for id, _LEI, filing_number, existenceStatus in table:
self.entityId = id
self.entityPreviouslyInDB = existenceStatus
break
else:
for id, filing_number, existenceStatus in table:
self.entityId = id
self.entityPreviouslyInDB = existenceStatus
break
if any ('former-conformed-name' in key for key in entityInfo.keys()):
self.getTable('former_entity', None,
('entity_id', 'former_name', 'date_changed'),
('entity_id', 'former_name', 'date_changed'),
((self.entityId,
entityInfo.get(keyPrefix + '.former-conformed-name'),
entityInfo.get(keyPrefix + '.date-changed'))
for key in entityInfo.keys() if 'former-conformed-name' in key
for keyPrefix in (key.partition('.')[0],)),
checkIfExisting=True)
self.showStatus("insert filing")
table = self.getTable('filing', 'filing_id',
('filing_number', 'form_type', 'entity_id', 'reference_number',
'accepted_timestamp', 'is_most_current', 'filing_date',
'creation_software',
'authority_html_url', 'entry_url', ),
('filing_number',),
((rssItemGet("accessionNumber") or entityInfo.get("accession-number") or str(int(time.time())), # NOT NULL
rssItemGet("formType") or entityInfo.get("form-type"),
self.entityId,
rssItemGet("cikNumber") or entityInfo.get("cik"),
rssItemGet("acceptanceDatetime") or entityInfo.get("acceptance-datetime") or now,
True,
rssItemGet("filingDate") or entityInfo.get("filing-date") or now, # NOT NULL
self.modelXbrl.modelDocument.creationSoftware,
rssItemGet("htmlUrl") or entityInfo.get("primary-document-url"),
rssItemGet("url") or entityInfo.get("instance-url")
),),
checkIfExisting=True,
returnExistenceStatus=True)
for id, filing_number, existenceStatus in table:
self.filingId = id
self.filingPreviouslyInDB = existenceStatus
break
self.showStatus("insert report")
table = self.getTable('report', 'report_id',
('filing_id', ),
('filing_id',),
((self.filingId,
),),
checkIfExisting=True,
returnExistenceStatus=True)
for id, foundFilingId, existenceStatus in table:
self.reportId = id
self.filingPreviouslyInDB = existenceStatus
break
def isSemanticDocument(self, modelDocument):
if modelDocument.type == Type.SCHEMA:
# must include document items taxonomy even if not in DTS
return modelDocument.inDTS or modelDocument.targetNamespace == "http://arelle.org/doc/2014-01-31"
return modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL, Type.LINKBASE)
def identifyPreexistingDocuments(self):
self.existingDocumentIds = {}
self.urlDocs = {}
docUris = set()
for modelDocument in self.modelXbrl.urlDocs.values():
url = ensureUrl(modelDocument.uri)
self.urlDocs[url] = modelDocument
if self.isSemanticDocument(modelDocument):
docUris.add(self.dbStr(url))
if docUris:
results = self.execute("SELECT document_id, document_url FROM {} WHERE document_url IN ({})"
.format(self.dbTableName("document"),
', '.join(docUris)))
self.existingDocumentIds = dict((self.urlDocs[self.pyStrFromDbStr(docUrl)],docId)
for docId, docUrl in results)
# identify whether taxonomyRelsSetsOwner is existing
self.isExistingTaxonomyRelSetsOwner = (
self.taxonomyRelSetsOwner.type not in (Type.INSTANCE, Type.INLINEXBRL, Type.INLINEXBRLDOCUMENTSET) and
self.taxonomyRelSetsOwner in self.existingDocumentIds)
def identifyAspectsUsed(self):
# relationshipSets are a dts property
self.relationshipSets = [(arcrole, ELR, linkqname, arcqname)
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR and (arcrole.startswith("XBRL-") or (linkqname and arcqname))]
aspectsUsed = set(f.concept
for f in self.modelXbrl.factsInInstance)
for cntx in self.modelXbrl.contexts.values():
for dim in cntx.qnameDims.values():
aspectsUsed.add(dim.dimension)
if dim.isExplicit:
aspectsUsed.add(dim.member)
else:
aspectsUsed.add(self.modelXbrl.qnameConcepts[dim.typedMember.qname])
for defaultDimQn, defaultDimMemberQn in self.modelXbrl.qnameDimensionDefaults.items():
aspectsUsed.add(self.modelXbrl.qnameConcepts[defaultDimQn])
aspectsUsed.add(self.modelXbrl.qnameConcepts[defaultDimMemberQn])
for relationshipSetKey in self.relationshipSets:
relationshipSet = self.modelXbrl.relationshipSet(*relationshipSetKey)
for rel in relationshipSet.modelRelationships:
if isinstance(rel.fromModelObject, ModelConcept):
aspectsUsed.add(rel.fromModelObject)
if isinstance(rel.toModelObject, ModelConcept):
aspectsUsed.add(rel.toModelObject)
try:
for qn in (XbrlConst.qnXbrliIdentifier, XbrlConst.qnXbrliPeriod, XbrlConst.qnXbrliUnit):
aspectsUsed.add(self.modelXbrl.qnameConcepts[qn])
except KeyError:
pass # no DTS
for roleTypes in (self.modelXbrl.roleTypes.values(), self.modelXbrl.arcroleTypes.values()):
for roleUriTypes in roleTypes:
for roleType in roleUriTypes:
for qn in roleType.usedOns:
if qn in self.modelXbrl.qnameConcepts: # qname may be undefined or invalid and still 2.1 legal
aspectsUsed.add(self.modelXbrl.qnameConcepts[qn])
# add aspects referenced by logging entries
for logEntry in self.loggingEntries:
for ref in logEntry['refs']:
modelObject = self.modelXbrl.modelObject(ref.get('objectId',''))
if isinstance(modelObject, ModelConcept) and modelObject.modelDocument.inDTS:
aspectsUsed.add(modelObject)
# add substitution groups
aspectsUsed |= set(aspect.substitutionGroup
for aspect in aspectsUsed
if aspect is not None)
aspectsUsed -= {None} # remove None if in aspectsUsed
self.aspectsUsed = aspectsUsed
typesUsed = set()
def typeUsed(modelType):
if modelType is not None and modelType.modelDocument.inDTS: # exclude nonDTS types (schema, etc)
typesUsed.add(modelType)
typesDerivedFrom = modelType.typeDerivedFrom
if isinstance(typesDerivedFrom, list): # union derivation
for typeDerivedFrom in typesDerivedFrom:
if typeDerivedFrom not in typesUsed:
typeUsed(typeDerivedFrom)
else: # single derivation
if typesDerivedFrom is not None and typesDerivedFrom not in typesUsed:
typeUsed(typesDerivedFrom)
for aspect in aspectsUsed:
modelType = aspect.type
if modelType is not None:
if modelType not in typesUsed:
typeUsed(modelType)
self.typesUsed = typesUsed
def insertDocuments(self):
|
def insertAspects(self):
self.showStatus("insert aspects")
# determine new filing documents and types they use
filingDocumentAspects = set()
existingDocumentUsedAspects = set()
for concept in self.modelXbrl.qnameConcepts.values():
if concept.modelDocument not in self.existingDocumentIds:
filingDocumentAspects.add(concept)
filingDocumentAspectType = concept.type
if filingDocumentAspectType is not None and filingDocumentAspectType not in self.typesUsed:
self.typesUsed.add(filingDocumentAspectType)
elif concept in self.aspectsUsed:
existingDocumentUsedAspects.add(concept)
filingDocumentTypes = set()
existingDocumentUsedTypes = set()
for modelType in self.modelXbrl.qnameTypes.values():
if modelType.modelDocument not in self.existingDocumentIds:
filingDocumentTypes.add(modelType)
elif modelType in self.typesUsed:
existingDocumentUsedTypes.add(modelType)
# get existing element IDs
self.typeQnameId = {}
if existingDocumentUsedTypes:
typeQnameIds = []
table = self.getTable('data_type', 'data_type_id',
('document_id', 'qname',),
('document_id', 'qname',),
tuple((self.documentIds[modelType.modelDocument],
modelType.qname.clarkNotation)
for modelType in existingDocumentUsedTypes
if modelType.modelDocument in self.documentIds),
checkIfExisting=True,
insertIfNotMatched=False)
for typeId, docId, qn in table:
self.typeQnameId[qname(qn)] = typeId
table = self.getTable('data_type', 'data_type_id',
('document_id', 'xml_id', 'xml_child_seq',
'qname', 'name', 'base_type', 'derived_from_type_id'),
('document_id', 'qname',),
tuple((self.documentIds[modelType.modelDocument],
modelType.id,
elementChildSequence(modelType),
modelType.qname.clarkNotation,
modelType.name,
modelType.baseXsdType,
self.typeQnameId.get(modelType.typeDerivedFrom)
if isinstance(modelType.typeDerivedFrom, ModelType) else None)
for modelType in filingDocumentTypes
if modelType.modelDocument in self.documentIds)
)
for typeId, docId, qn in table:
self.typeQnameId[qname(qn)] = typeId
updatesToDerivedFrom = set()
for modelType in filingDocumentTypes:
if isinstance(modelType.typeDerivedFrom, ModelType):
typeDerivedFrom = modelType.typeDerivedFrom
if (typeDerivedFrom in filingDocumentTypes and
modelType.qname in self.typeQnameId and
typeDerivedFrom.qname in self.typeQnameId):
updatesToDerivedFrom.add( (self.typeQnameId[modelType.qname],
self.typeQnameId[typeDerivedFrom.qname]) )
# update derivedFrom's of newly added types
if updatesToDerivedFrom:
self.updateTable('data_type',
('data_type_id', 'derived_from_type_id'),
updatesToDerivedFrom)
existingDocumentUsedTypes.clear() # dereference
filingDocumentTypes.clear() # dereference
self.aspectQnameId = {}
# get existing element IDs
if existingDocumentUsedAspects:
table = self.getTable('aspect', 'aspect_id',
('document_id', 'qname',),
('document_id', 'qname',),
tuple((self.documentIds[concept.modelDocument],
concept.qname.clarkNotation)
for concept in existingDocumentUsedAspects
if concept.modelDocument in self.documentIds),
checkIfExisting=True,
insertIfNotMatched=False)
for aspectId, docId, qn in table:
self.aspectQnameId[qname(qn)] = aspectId
aspects = []
for concept in filingDocumentAspects:
niceType = concept.niceType
if niceType is not None and len(niceType) > 128:
niceType = niceType[:128]
if concept.modelDocument in self.documentIds:
aspects.append((self.documentIds[concept.modelDocument],
concept.id,
elementChildSequence(concept),
concept.qname.clarkNotation,
concept.name,
self.typeQnameId.get(concept.typeQname),
niceType[:128] if niceType is not None else None,
self.aspectQnameId.get(concept.substitutionGroupQname),
concept.balance,
concept.periodType,
concept.isAbstract,
concept.isNillable,
concept.isNumeric,
concept.isMonetary,
concept.isTextBlock))
table = self.getTable('aspect', 'aspect_id',
('document_id', 'xml_id', 'xml_child_seq',
'qname', 'name', 'datatype_id', 'base_type', 'substitution_group_aspect_id',
'balance', 'period_type', 'abstract', 'nillable',
'is_numeric', 'is_monetary', 'is_text_block'),
('document_id', 'qname'),
aspects
)
for aspectId, docId, qn in table:
self.aspectQnameId[qname(qn)] = aspectId
updatesToSubstitutionGroup = set()
for concept in filingDocumentAspects:
if concept.substitutionGroup in filingDocumentAspects and concept.modelDocument in self.documentIds:
updatesToSubstitutionGroup.add( (self.aspectQnameId[concept.qname],
self.aspectQnameId.get(concept.substitutionGroupQname)) )
# update derivedFrom's of newly added types
if updatesToSubstitutionGroup:
self.updateTable('aspect',
('aspect_id', 'substitution_group_aspect_id'),
updatesToSubstitutionGroup)
filingDocumentAspects.clear() # dereference
existingDocumentUsedAspects.clear() # dereference
def insertArcroleTypes(self):
self.showStatus("insert arcrole types")
# add existing arcrole types
arcroleTypesByIds = set((self.documentIds[arcroleType.modelDocument],
arcroleType.roleURI) # key on docId, uriId
for arcroleTypes in self.modelXbrl.arcroleTypes.values()
for arcroleType in arcroleTypes
if arcroleType.modelDocument in self.existingDocumentIds)
table = self.getTable('arcrole_type', 'arcrole_type_id',
('document_id', 'arcrole_uri'),
('document_id', 'arcrole_uri'),
tuple((arcroleTypeIDs[0], # doc Id
arcroleTypeIDs[1] # uri Id
)
for arcroleTypeIDs in arcroleTypesByIds),
checkIfExisting=True,
insertIfNotMatched=False)
self.arcroleTypeIds = {}
for arcroleId, docId, uri in table:
self.arcroleTypeIds[(docId, uri)] = arcroleId
# added document arcrole type
arcroleTypesByIds = dict(((self.documentIds[arcroleType.modelDocument],
arcroleType.arcroleURI), # key on docId, uriId
arcroleType) # value is roleType object
for arcroleTypes in self.modelXbrl.arcroleTypes.values()
for arcroleType in arcroleTypes
if arcroleType.modelDocument not in self.existingDocumentIds)
table = self.getTable('arcrole_type', 'arcrole_type_id',
('document_id', 'xml_id', 'xml_child_seq', 'arcrole_uri', 'cycles_allowed', 'definition'),
('document_id', 'arcrole_uri'),
tuple((arcroleTypeIDs[0], # doc Id
arcroleType.id,
elementChildSequence(arcroleType),
arcroleType.arcroleURI,
arcroleType.cyclesAllowed,
arcroleType.definition)
for arcroleTypeIDs, arcroleType in arcroleTypesByIds.items()))
for arcroleId, docId, uri in table:
self.arcroleTypeIds[(docId, uri)] = arcroleId
table = self.getTable('used_on',
None, # no record id in this table
('object_id', 'aspect_id'),
('object_id', 'aspect_id'),
tuple((self.arcroleTypeIds[(arcroleTypeIDs[0], arcroleType.arcroleURI)],
self.aspectQnameId[usedOnQn])
for arcroleTypeIDs, arcroleType in arcroleTypesByIds.items()
for usedOnQn in arcroleType.usedOns
if usedOnQn in self.aspectQnameId),
checkIfExisting=True)
def insertRoleTypes(self):
self.showStatus("insert role types")
# add existing role types
roleTypesByIds = set((self.documentIds[roleType.modelDocument],
roleType.roleURI) # key on docId, uriId
for roleTypes in self.modelXbrl.roleTypes.values()
for roleType in roleTypes
if roleType.modelDocument in self.existingDocumentIds)
table = self.getTable('role_type', 'role_type_id',
('document_id', 'role_uri'),
('document_id', 'role_uri'),
tuple((roleTypeIDs[0], # doc Id
roleTypeIDs[1] # uri Id
)
for roleTypeIDs in roleTypesByIds),
checkIfExisting=True,
insertIfNotMatched=False)
self.roleTypeIds = {}
for roleId, docId, uri in table:
self.roleTypeIds[(docId, uri)] = roleId
# new document role types
roleTypesByIds = dict(((self.documentIds[roleType.modelDocument],
roleType.roleURI), # key on docId, uriId
roleType) # value is roleType object
for roleTypes in self.modelXbrl.roleTypes.values()
for roleType in roleTypes
if roleType.modelDocument not in self.existingDocumentIds)
table = self.getTable('role_type', 'role_type_id',
('document_id', 'xml_id', 'xml_child_seq', 'role_uri', 'definition'),
('document_id', 'role_uri'),
tuple((roleTypeIDs[0], # doc Id
roleType.id,
elementChildSequence(roleType),
roleTypeIDs[1], # uri Id
roleType.definition)
for roleTypeIDs, roleType in roleTypesByIds.items()))
for roleId, docId, uri in table:
self.roleTypeIds[(docId, uri)] = roleId
table = self.getTable('used_on',
None, # no record id in this table
('object_id', 'aspect_id'),
('object_id', 'aspect_id'),
tuple((self.roleTypeIds[(roleTypeIDs[0], roleType.roleURI)],
self.aspectQnameId[usedOnQn])
for roleTypeIDs, roleType in roleTypesByIds.items()
for usedOnQn in roleType.usedOns
if usedOnQn in self.aspectQnameId),
checkIfExisting=True)
def insertResources(self):
self.showStatus("insert resources")
# deduplicate resources (may be on multiple arcs)
arcroles = [arcrole
# check whether relationship_set is completely in instance or part/all in taxonomy
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR is None and linkqname is None and arcqname is None and not arcrole.startswith("XBRL-")
and self.arcroleHasResource[arcrole]
and (self.arcroleInInstance[arcrole] or not self.isExistingTaxonomyRelSetsOwner)]
# note that lxml has no column numbers, use objectIndex as pseudo-column number
uniqueResources = dict(((self.documentIds[resource.modelDocument],
resource.objectIndex), resource)
for arcrole in arcroles
for rel in self.modelXbrl.relationshipSet(arcrole).modelRelationships
for resource in (rel.fromModelObject, rel.toModelObject)
if isinstance(resource, ModelResource))
table = self.getTable('resource', 'resource_id',
('document_id', 'xml_id', 'xml_child_seq', 'qname', 'role', 'value', 'xml_lang'),
('document_id', 'xml_child_seq'),
tuple((self.documentIds[resource.modelDocument],
resource.id,
elementChildSequence(resource),
resource.qname.clarkNotation,
resource.role,
resource.textValue,
resource.xmlLang)
for resource in uniqueResources.values()),
checkIfExisting=True)
self.resourceId = dict(((docId, xml_child_seq), id)
for id, docId, xml_child_seq in table)
uniqueResources.clear()
def modelObjectId(self, modelObject):
if isinstance(modelObject, ModelConcept):
return self.aspectQnameId.get(modelObject.qname)
elif isinstance(modelObject, ModelType):
return self.aspectTypeIds.get(modelObject.qname)
elif isinstance(modelObject, ModelResource):
return self.resourceId.get((self.documentIds[modelObject.modelDocument],
elementChildSequence(modelObject)))
elif isinstance(modelObject, ModelFact):
return self.factDataPointId.get((self.documentIds[modelObject.modelDocument],
elementChildSequence(modelObject)))
else:
return None
def insertRelationships(self):
self.showStatus("insert relationship sets")
table = self.getTable('relationship_set', 'relationship_set_id',
('document_id', 'link_role', 'arc_role', 'link_qname', 'arc_qname'),
('document_id', 'link_role', 'arc_role', 'link_qname', 'arc_qname'),
tuple((self.documentIds[self.modelXbrl.modelDocument if self.arcroleInInstance[arcrole]
else self.taxonomyRelSetsOwner],
ELR,
arcrole,
linkqname.clarkNotation,
arcqname.clarkNotation)
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR and linkqname and arcqname and not arcrole.startswith("XBRL-")
and (not self.isExistingTaxonomyRelSetsOwner or self.arcroleInInstance[arcrole])))
self.relSetId = dict(((linkRole, arcRole, lnkQn, arcQn), id)
for id, document_id, linkRole, arcRole, lnkQn, arcQn in table)
# do tree walk to build relationships with depth annotated, no targetRole navigation
dbRels = []
def walkTree(rels, seq, depth, relationshipSet, visited, dbRels, relSetId):
for rel in rels:
if rel not in visited and isinstance(rel.toModelObject, ModelObject):
visited.add(rel)
dbRels.append((rel, seq, depth, relSetId))
seq += 1
seq = walkTree(relationshipSet.fromModelObject(rel.toModelObject), seq, depth+1, relationshipSet, visited, dbRels, relSetId)
visited.remove(rel)
return seq
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys():
if (ELR and linkqname and arcqname and not arcrole.startswith("XBRL-")
and (not self.isExistingTaxonomyRelSetsOwner or self.arcroleInInstance[arcrole])):
relSetId = self.relSetId[(ELR,
arcrole,
linkqname.clarkNotation,
arcqname.clarkNotation)]
relationshipSet = self.modelXbrl.relationshipSet(arcrole, ELR, linkqname, arcqname)
seq = 1
for rootConcept in relationshipSet.rootConcepts:
seq = walkTree(relationshipSet.fromModelObject(rootConcept), seq, 1, relationshipSet, set(), dbRels, relSetId)
def resourceResourceId(resource):
if isinstance(resource, ModelResource):
return self.resourceId.get((self.documentIds[resource.modelDocument],
resource.sourceline,
resource.objectIndex))
else:
return None
table = self.getTable('relationship', 'relationship_id',
('document_id', 'xml_id', 'xml_child_seq',
'relationship_set_id', 'reln_order',
'from_id', 'to_id', 'calculation_weight',
'tree_sequence', 'tree_depth', 'preferred_label_role'),
('relationship_set_id', 'document_id', 'xml_child_seq'),
tuple((self.documentIds[rel.modelDocument],
rel.id,
elementChildSequence(rel.arcElement),
relSetId,
self.dbNum(rel.order),
self.modelObjectId(rel.fromModelObject),
self.modelObjectId(rel.toModelObject),
self.dbNum(rel.weight), # none if no weight
sequence,
depth,
rel.preferredLabel)
for rel, sequence, depth, relSetId in dbRels
if isinstance(rel.fromModelObject, ModelObject) and isinstance(rel.toModelObject, ModelObject)))
self.relationshipId = dict(((docId,xml_child_seq), relationshipId)
for relationshipId, relSetId, docId, xml_child_seq in table)
table = self.getTable('root', None,
('relationship_set_id', 'relationship_id'),
('relationship_set_id', 'relationship_id'),
tuple((relSetId,
self.relationshipId[self.documentIds[rel.modelDocument],
elementChildSequence(rel.arcElement)])
for rel, sequence, depth, relSetId in dbRels
if depth == 1 and
isinstance(rel.fromModelObject, ModelObject) and isinstance(rel.toModelObject, ModelObject)))
del dbRels[:] # dererefence
def insertDataPoints(self):
reportId = self.reportId
if self.filingPreviouslyInDB:
self.showStatus("deleting prior data points of this report")
# remove prior facts
self.lockTables(("data_point", "entity_identifier", "period", "aspect_value_selection",
"aspect_value_selection_set", "unit_measure", "unit",
"table_data_points"))
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("data_point"), reportId),
close=False, fetch=False)
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("entity_identifier"), reportId),
close=False, fetch=False)
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("period"), reportId),
close=False, fetch=False)
self.execute("DELETE from {0} "
"USING {1} "
"WHERE {1}.report_id = {2} AND {0}.aspect_value_selection_id = {1}.aspect_value_selection_id"
.format( self.dbTableName("aspect_value_selection"),
self.dbTableName("aspect_value_selection_set"),
reportId),
close=False, fetch=False)
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1};"
.format( self.dbTableName("aspect_value_selection_set"), reportId),
close=False, fetch=False)
self.execute("DELETE from {0} "
"USING {1} "
"WHERE {1}.report_id = {2} AND {0}.unit_id = {1}.unit_id"
.format( self.dbTableName("unit_measure"),
self.dbTableName("unit"),
reportId),
close=False, fetch=False)
self.execute("DELETE from {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("unit"), reportId),
close=False, fetch=False)
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("table_data_points"), reportId),
close=False, fetch=False)
self.showStatus("insert data points")
# units
table = self.getTable('unit', 'unit_id',
('report_id', 'xml_id', 'xml_child_seq', 'measures_hash'),
('report_id', 'measures_hash'),
tuple((reportId,
unit.id,
elementChildSequence(unit),
unit.md5hash)
for unit in dict((unit.md5hash,unit) # deduplicate by md5hash
for unit in self.modelXbrl.units.values()).values()))
self.unitId = dict(((_reportId, measuresHash), id)
for id, _reportId, measuresHash in table)
# measures
table = self.getTable('unit_measure',
None,
('unit_id', 'qname', 'is_multiplicand'),
('unit_id', 'qname', 'is_multiplicand'),
tuple((self.unitId[(reportId,unit.md5hash)],
measure.clarkNotation,
i == 0)
for unit in self.modelXbrl.units.values()
for i in range(2)
for measure in unit.measures[i]))
table = self.getTable('entity_identifier', 'entity_identifier_id',
('report_id', 'scheme', 'identifier'),
('report_id', 'scheme', 'identifier'),
set((reportId,
cntx.entityIdentifier[0],
cntx.entityIdentifier[1])
for cntx in self.modelXbrl.contexts.values()),
checkIfExisting=True) # entities shared across multiple instance/inline docs
self.entityIdentifierId = dict(((_reportId, entScheme, entIdent), id)
for id, _reportId, entScheme, entIdent in table)
table = self.getTable('period', 'period_id',
('report_id', 'start_date', 'end_date', 'is_instant', 'is_forever'),
('report_id', 'start_date', 'end_date', 'is_instant', 'is_forever'),
set((reportId,
cntx.startDatetime if cntx.isStartEndPeriod else None,
cntx.endDatetime if (cntx.isStartEndPeriod or cntx.isInstantPeriod) else None,
cntx.isInstantPeriod,
cntx.isForeverPeriod)
for cntx in self.modelXbrl.contexts.values()),
checkIfExisting=True) # periods shared across multiple instance/inline docs
self.periodId = dict(((_reportId, start, end, isInstant, isForever), id)
for id, _reportId, start, end, isInstant, isForever in table)
def cntxDimsSet(cntx):
return frozenset((self.aspectQnameId[modelDimValue.dimensionQname],
self.aspectQnameId.get(modelDimValue.memberQname),
modelDimValue.isTyped,
modelDimValue.stringValue if modelDimValue.isTyped else None)
for modelDimValue in cntx.qnameDims.values()
if modelDimValue.dimensionQname in self.aspectQnameId)
cntxAspectValueSelectionSet = dict((cntx, cntxDimsSet(cntx))
for cntx in self.modelXbrl.contexts.values())
aspectValueSelections = set(aspectValueSelectionSet
for cntx, aspectValueSelectionSet in cntxAspectValueSelectionSet.items()
if aspectValueSelectionSet)
self.lockTables(("aspect_value_selection_set",))
self.execute("DELETE FROM {0} WHERE report_id = {1}"
.format(self.dbTableName("aspect_value_selection_set"), reportId),
close=False, fetch=False)
table = self.getTable('aspect_value_selection_set', 'aspect_value_selection_id',
('report_id', ),
('report_id', ),
tuple((reportId,)
for aspectValueSelection in aspectValueSelections)
)
# assure we only get single entry per result (above gives cross product)
table = self.execute("SELECT aspect_value_selection_id, report_id from {0} "
"WHERE report_id = {1}"
.format(self.dbTableName("aspect_value_selection_set"), reportId))
aspectValueSelectionSets = dict((aspectValueSelections.pop(), id)
for id, _reportId in table)
cntxAspectValueSelectionSetId = dict((cntx, aspectValueSelectionSets[_cntxDimsSet])
for cntx, _cntxDimsSet in cntxAspectValueSelectionSet.items()
if _cntxDimsSet)
table = self.getTable('aspect_value_selection',
None,
('aspect_value_selection_id', 'aspect_id', 'aspect_value_id', 'is_typed_value', 'typed_value'),
('aspect_value_selection_id', ),
tuple((aspectValueSetId, dimId, dimMbrId, isTyped, typedValue)
for aspectValueSelection, aspectValueSetId in aspectValueSelectionSets.items()
for dimId, dimMbrId, isTyped, typedValue in aspectValueSelection)
)
# facts
def insertFactSet(modelFacts, parentDatapointId):
facts = []
for fact in modelFacts:
if fact.concept is not None and getattr(fact, "xValid", UNVALIDATED) >= VALID and fact.qname is not None:
cntx = fact.context
documentId = self.documentIds[fact.modelDocument]
facts.append((reportId,
documentId,
fact.id,
elementChildSequence(fact),
fact.sourceline,
parentDatapointId, # parent ID
self.aspectQnameId.get(fact.qname),
fact.contextID,
self.entityIdentifierId.get((reportId, cntx.entityIdentifier[0], cntx.entityIdentifier[1]))
if cntx is not None else None,
self.periodId.get((reportId,
cntx.startDatetime if cntx.isStartEndPeriod else None,
cntx.endDatetime if (cntx.isStartEndPeriod or cntx.isInstantPeriod) else None,
cntx.isInstantPeriod,
cntx.isForeverPeriod)) if cntx is not None else None,
cntxAspectValueSelectionSetId.get(cntx) if cntx is not None else None,
self.unitId.get((reportId,fact.unit.md5hash)) if fact.unit is not None else None,
fact.isNil,
fact.precision,
fact.decimals,
roundValue(fact.value, fact.precision, fact.decimals) if fact.isNumeric and not fact.isNil else None,
fact.value
))
table = self.getTable('data_point', 'datapoint_id',
('report_id', 'document_id', 'xml_id', 'xml_child_seq', 'source_line',
'parent_datapoint_id', # tuple
'aspect_id',
'context_xml_id', 'entity_identifier_id', 'period_id', 'aspect_value_selection_id', 'unit_id',
'is_nil', 'precision_value', 'decimals_value', 'effective_value', 'value'),
('document_id', 'xml_child_seq'),
facts)
xmlIdDataPointId = dict(((docId, xml_child_seq), datapointId)
for datapointId, docId, xml_child_seq in table)
self.factDataPointId.update(xmlIdDataPointId)
for fact in modelFacts:
if fact.isTuple:
try:
insertFactSet(fact.modelTupleFacts,
xmlIdDataPointId[(self.documentIds[fact.modelDocument],
elementChildSequence(fact))])
except KeyError:
self.modelXbrl.info("xpDB:warning",
_("Loading XBRL DB: tuple's datapoint not found: %(tuple)s"),
modelObject=fact, tuple=fact.qname)
self.factDataPointId = {}
insertFactSet(self.modelXbrl.facts, None)
# hashes
if self.tableFacts: # if any entries
tableDataPoints = []
for roleType, tableCode, fact in self.tableFacts:
try:
tableDataPoints.append((reportId,
self.roleTypeIds[(self.documentIds[roleType.modelDocument],
roleType.roleURI)],
tableCode,
self.factDataPointId[(self.documentIds[fact.modelDocument],
elementChildSequence(fact))]))
except KeyError:
# print ("missing table data points role or data point")
pass
table = self.getTable('table_data_points', None,
('report_id', 'object_id', 'table_code', 'datapoint_id'),
('report_id', 'object_id', 'datapoint_id'),
tableDataPoints)
def insertValidationResults(self):
reportId = self.reportId
if self.filingPreviouslyInDB:
self.showStatus("deleting prior messages of this report")
# remove prior messages for this report
self.lockTables(("message", "message_reference"))
self.execute("DELETE from {0} "
"USING {1} "
"WHERE {1}.report_id = {2} AND {1}.message_id = {0}.message_id"
.format(self.dbTableName("message_reference"),
self.dbTableName("message"),
reportId),
close=False, fetch=False)
self.execute("DELETE FROM {} WHERE message.report_id = {}"
.format(self.dbTableName("message"),reportId),
close=False, fetch=False)
messages = []
messageRefs = defaultdict(set) # direct link to objects
for i, logEntry in enumerate(self.loggingEntries):
sequenceInReport = i+1
for ref in logEntry['refs']:
modelObject = self.modelXbrl.modelObject(ref.get('objectId',''))
# for now just find a concept
objectId = None
if isinstance(modelObject, ModelFact):
objectId = self.factDataPointId.get((self.documentIds.get(modelObject.modelDocument),
elementChildSequence(modelObject)))
elif isinstance(modelObject, ModelRelationship):
objectId = self.relSetId.get((modelObject.linkrole,
modelObject.arcrole,
modelObject.linkQname.clarkNotation,
modelObject.arcElement.qname.clarkNotation))
elif isinstance(modelObject, ModelConcept):
objectId = self.aspectQnameId.get(modelObject.qname)
elif isinstance(modelObject, ModelXbrl):
objectId = reportId
elif hasattr(modelObject, "modelDocument"):
objectId = self.documentIds.get(modelObject.modelDocument)
if objectId is not None:
messageRefs[sequenceInReport].add(objectId)
messages.append((reportId,
sequenceInReport,
logEntry['code'],
logEntry['level'],
logEntry['message']['text']))
if messages:
self.showStatus("insert validation messages")
table = self.getTable('message', 'message_id',
('report_id', 'sequence_in_report', 'message_code', 'message_level', 'value'),
('report_id', 'sequence_in_report'),
messages)
messageIds = dict((sequenceInReport, messageId)
for messageId, _reportId, sequenceInReport in table)
table = self.getTable('message_reference', None,
('message_id', 'object_id'),
('message_id', 'object_id'),
tuple((messageId,
objectId)
for sequenceInReport, objectIds in messageRefs.items()
for objectId in objectIds
for messageId in (messageIds[sequenceInReport],)))
countryOfState = {
"AL": "US","AK": "US","AZ": "US","AR": "US","CA": "US","CO": "US", "CT": "US","DE": "US",
"FL": "US","GA": "US","HI": "US","ID": "US","IL": "US","IN": "US","IA": "US","KS": "US",
"KY": "US","LA": "US","ME": "US","MD": "US","MA": "US","MI": "US","MN": "US","MS": "US",
"MO": "US","MT": "US","NE": "US","NV": "US","NH": "US","NJ": "US","NM": "US","NY": "US",
"NC": "US","ND": "US","OH": "US","OK": "US","OR": "US","PA": "US","RI": "US","SC": "US",
"SD": "US","TN": "US","TX": "US","UT": "US","VT": "US","VA": "US","WA": "US","WV": "US",
"WI": "US","WY": "US","DC": "US","PR": "US","VI": "US","AS": "US","GU": "US","MP": "US",
"AB": "CA","BC": "CA","MB": "CA","NB": "CA","NL": "CA","NS": "CA","ON": "CA","PE": "CA",
"QC": "CA","SK": "CA","NT": "CA","NU": "CA","YT": "CA"}
| self.showStatus("insert documents")
table = self.getTable('document', 'document_id',
('document_url', 'document_type', 'namespace'),
('document_url',),
set((ensureUrl(docUrl),
Type.typeName[mdlDoc.type],
mdlDoc.targetNamespace)
for docUrl, mdlDoc in self.modelXbrl.urlDocs.items()
if mdlDoc not in self.existingDocumentIds and
self.isSemanticDocument(mdlDoc)),
checkIfExisting=True)
self.documentIds = dict((self.urlDocs[self.pyStrFromDbStr(url)], id)
for id, url in table)
self.documentIds.update(self.existingDocumentIds)
referencedDocuments = set()
# instance documents are filing references
# update report with document references
for mdlDoc in self.modelXbrl.urlDocs.values():
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
referencedDocuments.add( (self.reportId, self.documentIds[mdlDoc] ))
if mdlDoc in self.documentIds:
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType in ("href", "import", "include") \
and refDoc in self.documentIds:
referencedDocuments.add( (self.documentIds[mdlDoc], self.documentIds[refDoc] ))
table = self.getTable('referenced_documents',
None, # no id column in this table
('object_id','document_id'),
('object_id','document_id'),
referencedDocuments,
checkIfExisting=True)
instDocId = instSchemaDocId = agencySchemaDocId = stdSchemaDocId = None
mdlDoc = self.modelXbrl.modelDocument
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
instDocId = self.documentIds[mdlDoc]
# referenced doc may be extension schema
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType == "href" and refDoc in self.documentIds:
instSchemaDocId = self.documentIds[refDoc]
break
elif mdlDoc.type == Type.SCHEMA:
instDocSchemaDocId = self.documentIds[mdlDoc]
for mdlDoc in self.modelXbrl.urlDocs.values():
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
referencedDocuments.add( (self.reportId, self.documentIds[mdlDoc] ))
if mdlDoc in self.documentIds:
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType in ("href", "import", "include") \
and refDoc in self.documentIds:
if refDoc.type == Type.SCHEMA:
nsAuthority = authority(refDoc.targetNamespace, includeScheme=False)
nsPath = refDoc.targetNamespace.split('/')
if len(nsPath) > 2:
if ((nsAuthority in ("fasb.org", "xbrl.us") and nsPath[-2] == "us-gaap") or
(nsAuthority == "xbrl.ifrs.org" and nsPath[-1] in ("ifrs", "ifrs-full", "ifrs-smes"))):
stdSchemaDocId = self.documentIds[refDoc]
elif (nsAuthority == "xbrl.sec.gov" and nsPath[-2] == "rr"):
agencySchemaDocId = self.documentIds[refDoc]
self.updateTable("report",
("report_id", "report_data_doc_id", "report_schema_doc_id", "agency_schema_doc_id", "standard_schema_doc_id"),
((self.reportId, instDocId, instSchemaDocId, agencySchemaDocId, stdSchemaDocId),)
) |
train.py | # -*- coding: utf-8 -*-
'''
Author: TJUZQC
Date: 2020-10-26 10:26:51
LastEditors: TJUZQC
LastEditTime: 2020-11-20 19:23:55
Description: None
'''
import argparse
import logging
import os
import sys
import numpy as np
import torch
import torch.nn as nn
import yaml
from torch import optim
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from evaluation import eval_net
from models import ChooseModel, init_weights
from utils.dataset import BasicDataset
conf = yaml.load(open(os.path.join(
sys.path[0], 'config', 'config.yaml')), Loader=yaml.FullLoader)
dir_img = conf['DATASET']['IMGS_DIR']
dir_mask = conf['DATASET']['MASKS_DIR']
dir_checkpoint = conf['MODEL']['CHECKPOINT_DIR']
def train_net(net,
device,
epochs=5,
batch_size=16,
lr=0.001,
val_percent=0.1,
save_cp=True,
img_scale=0.5,
use_apex=False,
optimizer='adam',
classes=2,
lr_scheduler='steplr',
lr_scheduler_cfgs: dict = {'step_size': 10}):
dataset = BasicDataset(dir_img, dir_mask, img_scale,
train=True, classes=classes)
n_val = int(len(dataset) * val_percent)
n_train = len(dataset) - n_val
train, val = random_split(dataset, [n_train, n_val])
train_loader = DataLoader(
train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True)
val_loader = DataLoader(val, batch_size=batch_size,
shuffle=False, num_workers=8, pin_memory=True)
writer = SummaryWriter(
comment=f'LR_{lr}_BS_{batch_size}_SCALE_{img_scale}')
global_step = 0
logging.info(f'''Starting training:
Epochs: {epochs}
Batch size: {batch_size}
Learning rate: {lr}
Training size: {n_train}
Validation size: {n_val}
Checkpoints: {save_cp}
Device: {device.type}
Images scaling: {img_scale}
Use apex: {use_apex}
''')
optimizers = {
'adadelta': optim.Adadelta,
'adagrad': optim.Adagrad,
'adam': optim.Adam,
'adamw': optim.AdamW,
'sparseadam': optim.SparseAdam,
'adamax': optim.Adamax,
'asgd': optim.ASGD,
'lbfgs': optim.LBFGS,
'rmsprop': optim.RMSprop,
'rprop': optim.Rprop,
'sgd': optim.SGD,
}
optimizer = optimizers.get(optimizer, None)(
net.parameters(), lr=lr, weight_decay=1e-8)
lr_scheduler_getter = {
'lambdalr': torch.optim.lr_scheduler.LambdaLR,
'multiplicativelr': torch.optim.lr_scheduler.MultiplicativeLR,
'steplr': torch.optim.lr_scheduler.StepLR,
'multisteplr': torch.optim.lr_scheduler.MultiStepLR,
'exponentiallr': torch.optim.lr_scheduler.ExponentialLR,
'cosineannealinglr': torch.optim.lr_scheduler.CosineAnnealingLR,
'reducelronplateau': torch.optim.lr_scheduler.ReduceLROnPlateau,
'cycliclr': torch.optim.lr_scheduler.CyclicLR,
'onecyclelr': torch.optim.lr_scheduler.OneCycleLR,
}
lr_scheduler = lr_scheduler_getter.get(
lr_scheduler.lower(), None)(optimizer, **lr_scheduler_cfgs)
if use_apex:
try:
from apex import amp
net, optimizer = amp.initialize(net, optimizer, opt_level="O1")
except ImportError as e:
print(e)
use_apex = False
if net.n_classes > 1:
criterion = nn.CrossEntropyLoss()
else:
criterion = nn.BCEWithLogitsLoss()
for epoch in range(epochs):
net.train()
epoch_loss = 0
with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='img') as pbar:
for batch in train_loader:
imgs = batch['image']
true_masks = batch['mask']
assert imgs.shape[1] == net.n_channels, \
f'Network has been defined with {net.n_channels} input channels, ' \
f'but loaded images have {imgs.shape[1]} channels. Please check that ' \
'the images are loaded correctly.'
imgs = imgs.to(device=device, dtype=torch.float32)
mask_type = torch.float32 if net.n_classes == 1 else torch.long
true_masks = true_masks.to(device=device, dtype=mask_type)
if net.n_classes > 1:
b, c, w, h = true_masks.shape
true_masks = true_masks.view(b, w, h)
masks_pred = net(imgs)
loss = criterion(masks_pred, true_masks)
epoch_loss += loss.item()
writer.add_scalar('Loss/train', loss.item(), global_step)
pbar.set_postfix(**{'loss (batch)': loss.item()})
optimizer.zero_grad()
if not use_apex:
loss.backward()
else:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
pbar.update(imgs.shape[0])
global_step += 1
dataset_len = len(dataset)
a1 = dataset_len // 10
a2 = dataset_len / 10
b1 = global_step % a1
b2 = global_step % a2
if global_step % (len(dataset) // (10 * batch_size)) == 0:
dice_coeff, pA, oA, precision, recall, f1score = eval_net(
net, val_loader, device, n_val)
if net.n_classes > 1:
logging.info(
'Validation cross entropy: {}'.format(dice_coeff))
writer.add_scalar('Loss/test', dice_coeff, global_step)
else:
logging.info(
'Validation Dice Coeff: {}'.format(dice_coeff))
writer.add_scalar('Dice/test', dice_coeff, global_step)
logging.info(
'Validation Pixel Accuracy: {}'.format(pA))
writer.add_scalar('pA/test', pA, global_step)
logging.info(
'Validation Overall Accuracy: {}'.format(oA))
writer.add_scalar('oA/test', oA, global_step)
logging.info(
'Validation Precision: {}'.format(precision))
writer.add_scalar('precision/test',
precision, global_step)
logging.info(
'Validation Recall: {}'.format(recall))
writer.add_scalar('recall/test', recall, global_step)
logging.info(
'Validation F1-score: {}'.format(f1score))
writer.add_scalar(
'F1-score/test', f1score, global_step)
writer.add_images('images', imgs, global_step)
if net.n_classes == 1:
writer.add_images(
'masks/true', true_masks, global_step)
writer.add_images(
'masks/pred', torch.sigmoid(masks_pred) > 0.5, global_step)
lr_scheduler.step()
if save_cp:
try:
os.mkdir(dir_checkpoint)
logging.info('Created checkpoint directory')
except OSError:
pass
torch.save(net.state_dict(),
os.path.join(dir_checkpoint, f'CP_epoch{epoch + 1}_loss_{str(loss.item())}.pth')) |
writer.close()
def get_args():
parser = argparse.ArgumentParser(description='Train the UNet on images and target masks',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', '--network', metavar='NETWORK', type=str,
default=conf['MODEL']['MODEL_NAME'], help='network type', dest='network')
parser.add_argument('-e', '--epochs', metavar='E', type=int, default=conf['NUM_EPOCHS'],
help='Number of epochs', dest='epochs')
parser.add_argument('-b', '--batch-size', metavar='B', type=int, nargs='?', default=conf['BATCH_SIZE'],
help='Batch size', dest='batchsize')
parser.add_argument('-l', '--learning-rate', metavar='LR', type=float, nargs='?', default=conf['LR'],
help='Learning rate', dest='lr')
parser.add_argument('-f', '--load', dest='load', type=str, default=conf['MODEL']['PRETRAINED_MODEL_DIR'],
help='Load model from a .pth file')
parser.add_argument('-s', '--scale', dest='scale', type=float, default=conf['SCALE'],
help='Downscaling factor of the images')
parser.add_argument('-v', '--validation', dest='val', type=float, default=conf['VALIDATION'],
help='Percent of the data that is used as validation (0-100)')
parser.add_argument('-t', '--init-type', dest='init_type', type=str, default=conf['INIT_TYPE'],
help='Init weights type')
parser.add_argument('-a', '--use-apex', dest='use_apex', type=str, default=conf['APEX'],
help='Automatic Mixed Precision')
parser.add_argument('-o', '--optimizer', dest='optimizer',
type=str, default=conf['OPTIMIZER'], help='Optimizer type')
parser.add_argument('-ls', '--lr-scheduler', dest='lr_scheduler',
type=str, default=conf['LR_SCHEDULER'], help='lr scheduler type')
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format='%(levelname)s: %(message)s')
args = get_args()
device = torch.device('cuda' if torch.cuda.is_available(
) and conf['DEVICE'].lower() == 'cuda' else 'cpu')
logging.info(f'Using device {device}')
network = args.network.lower()
# Change here to adapt to your data
# n_channels=3 for RGB images
# n_classes is the number of probabilities you want to get per pixel
# - For 1 class and background, use n_classes=1
# - For 2 classes, use n_classes=1
# - For N > 2 classes, use n_classes=N
net = ChooseModel(network)(
n_channels=3, n_classes=conf['DATASET']['NUM_CLASSES'])
assert net is not None, f'check your argument --network'
logging.info(f'Network:\n'
f'\t{net.n_channels} input channels\n'
f'\t{net.n_classes} output channels (classes)\n'
f'\t{"Bilinear" if net.bilinear else "Dilated conv"} upscaling\n'
f'\tApex is {"using" if args.use_apex == "True" else "not using"}')
init_weights(net, args.init_type)
if args.load:
net.load_state_dict(
torch.load(args.load, map_location=device)
)
logging.info(f'Model loaded from {args.load}')
net.to(device=device)
# faster convolutions, but more memory
# cudnn.benchmark = True
try:
train_net(net=net,
epochs=args.epochs,
batch_size=args.batchsize,
lr=args.lr,
device=device,
img_scale=args.scale,
val_percent=args.val / 100,
use_apex=(args.use_apex == "True"),
optimizer=args.optimizer.lower(),
classes=conf['DATASET']['NUM_CLASSES'],
lr_scheduler=args.lr_scheduler,
lr_scheduler_cfgs=conf['LR_SCHEDULER_CFGS'])
except KeyboardInterrupt:
torch.save(net.state_dict(), 'INTERRUPTED.pth')
logging.info('Saved interrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0) | logging.info(
f'Checkpoint {epoch + 1} saved ! loss (batch) = ' + str(loss.item())) |
mod.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under both the MIT license found in the
* LICENSE-MIT file in the root directory of this source tree and the Apache
* License, Version 2.0 found in the LICENSE-APACHE file in the root directory
* of this source tree.
*/
mod bytes_stream_future;
use std::cmp;
use std::io::{self, BufRead, Read};
use bytes_old::{BufMut, Bytes, BytesMut};
use futures::{try_ready, Async, Poll, Stream};
use tokio_io::codec::Decoder;
use tokio_io::AsyncRead;
pub use self::bytes_stream_future::BytesStreamFuture;
// 8KB is a reasonable default
const BUFSIZE: usize = 8 * 1024;
/// A structure that wraps a [Stream] of [Bytes] and lets it being accessed both
/// as a [Stream] and as [AsyncRead]. It is very useful when decoding Stream of
/// Bytes in an asynchronous way.
#[derive(Debug)]
pub struct BytesStream<S> {
bytes: BytesMut,
stream: S,
stream_done: bool,
}
impl<S: Stream<Item = Bytes>> BytesStream<S> {
/// Create a new instance of [BytesStream] wrapping the given [Stream] of [Bytes]
pub fn new(stream: S) -> Self {
BytesStream {
bytes: BytesMut::with_capacity(BUFSIZE),
stream,
stream_done: false,
}
}
/// Returns `true` if there are no more bytes left to be consumed
pub fn is_empty(&self) -> bool {
self.bytes.is_empty() && self.stream_done
}
/// Consumes this combinator returning a pair of bytes that have been received,
/// but not yet consumed and the Stream that can possibly yield more bytes
pub fn into_parts(self) -> (Bytes, S) {
(self.bytes.freeze(), self.stream)
}
/// Returns a future that yields a single decoded item from the Bytes of this
/// BytesStream (if any) and the remaining BytesStream.
pub fn into_future_decode<Dec>(self, decoder: Dec) -> BytesStreamFuture<S, Dec>
where
Dec: Decoder,
Dec::Error: From<S::Error>,
{
BytesStreamFuture::new(self, decoder)
}
/// Adds some bytes to the front of the BytesStream internal buffer. Those
/// bytes are ready to be read immediately after this function completes.
pub fn prepend_bytes(&mut self, bytes: Bytes) {
let mut bytes_mut = match bytes.try_mut() {
Ok(bytes_mut) => bytes_mut,
Err(bytes) => {
let cap = cmp::max(BUFSIZE, bytes.len() + self.bytes.len());
let mut bytes_mut = BytesMut::with_capacity(cap);
bytes_mut.put(bytes);
bytes_mut
}
};
bytes_mut.put(&self.bytes);
self.bytes = bytes_mut;
}
fn poll_buffer(&mut self) -> Poll<(), S::Error> {
if !self.stream_done {
let bytes = try_ready!(self.stream.poll());
match bytes {
None => self.stream_done = true,
Some(bytes) => self.bytes.extend_from_slice(&bytes),
}
}
Ok(Async::Ready(()))
}
fn poll_buffer_until(&mut self, len: usize) -> Poll<(), S::Error> {
while self.bytes.len() < len && !self.stream_done {
try_ready!(self.poll_buffer());
}
Ok(Async::Ready(()))
}
}
impl<S: Stream<Item = Bytes>> From<S> for BytesStream<S> {
fn from(stream: S) -> Self {
BytesStream::new(stream)
}
}
impl<S> Read for BytesStream<S>
where
S: Stream<Item = Bytes, Error = io::Error>,
{
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let r#async = self.poll_buffer_until(buf.len())?;
if self.bytes.is_empty() && r#async.is_not_ready() {
Err(io::Error::new(
io::ErrorKind::WouldBlock,
"inner stream not ready",
))
} else {
let len = {
let slice = self.bytes.as_ref();
let len = cmp::min(buf.len(), slice.len());
if len == 0 {
return Ok(0);
}
let slice = &slice[..len];
let buf = &mut buf[..len];
buf.copy_from_slice(slice);
len
};
self.bytes.split_to(len);
Ok(len)
}
}
}
impl<S> AsyncRead for BytesStream<S> where S: Stream<Item = Bytes, Error = io::Error> {}
impl<S> BufRead for BytesStream<S>
where
S: Stream<Item = Bytes, Error = io::Error>,
{
fn fill_buf(&mut self) -> io::Result<&[u8]> {
if self.bytes.is_empty() && self.poll_buffer_until(1)?.is_not_ready() {
Err(io::Error::new(
io::ErrorKind::WouldBlock,
"inner stream not ready",
))
} else {
Ok(self.bytes.as_ref())
}
}
fn consume(&mut self, amt: usize) {
self.bytes.split_to(amt);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::BoxStream;
use crate::StreamExt;
use futures::stream::iter_ok;
fn | (in_reads: Vec<Vec<u8>>) -> BytesStream<BoxStream<Bytes, io::Error>> {
let stream = iter_ok(in_reads.into_iter().map(|v| v.into()));
BytesStream::new(stream.boxify())
}
fn do_read<S>(reader: &mut BytesStream<S>, len_to_read: usize) -> io::Result<Vec<u8>>
where
S: Stream<Item = Bytes, Error = io::Error>,
{
let mut out = vec![0; len_to_read];
let len_read = reader.read(&mut out)?;
out.truncate(len_read);
Ok(out)
}
#[test]
fn test_read_once() -> io::Result<()> {
let mut reader = make_reader(vec![vec![1, 2, 3, 4]]);
let out = do_read(&mut reader, 4)?;
assert_eq!(out, vec![1, 2, 3, 4]);
Ok(())
}
#[test]
fn test_read_join() -> io::Result<()> {
let mut reader = make_reader(vec![vec![1, 2], vec![3, 4]]);
let out = do_read(&mut reader, 4)?;
assert_eq!(out, vec![1, 2, 3, 4]);
Ok(())
}
#[test]
fn test_read_split() -> io::Result<()> {
let mut reader = make_reader(vec![vec![1, 2, 3, 4]]);
let out = do_read(&mut reader, 2)?;
assert_eq!(out, vec![1, 2]);
let out = do_read(&mut reader, 2)?;
assert_eq!(out, vec![3, 4]);
Ok(())
}
#[test]
fn test_read_eof() -> io::Result<()> {
let mut reader = make_reader(vec![vec![1, 2, 3]]);
let out = do_read(&mut reader, 4)?;
assert_eq!(out, vec![1, 2, 3]);
Ok(())
}
#[test]
fn test_read_no_data() -> io::Result<()> {
let mut reader = make_reader(vec![vec![1, 2, 3]]);
let out = do_read(&mut reader, 4)?;
assert_eq!(out, vec![1, 2, 3]);
let out = do_read(&mut reader, 1)?;
assert_eq!(out, vec![]);
Ok(())
}
}
| make_reader |
model_service.py | #!/usr/bin/env python3
import os
import argparse
import sys
import pickle
import asyncio
import time
import numpy as np
import zmq
import pytao
from p4p.nt import NTTable
from p4p.server import Server as PVAServer
from p4p.server.asyncio import SharedPV
from zmq.asyncio import Context
import simulacrum
model_service_dir = os.path.dirname(os.path.realpath(__file__))
#set up python logger
L = simulacrum.util.SimulacrumLog(os.path.splitext(os.path.basename(__file__))[0], level='INFO')
class ModelService:
def __init__(self, init_file, name, enable_jitter=False, plot=False):
|
def start(self):
L.info("Starting %s Model Service.", self.name)
pva_server = PVAServer(providers=[{f"SIMULACRUM:SYS0:1:{self.name}:LIVE:TWISS": self.live_twiss_pv,
f"SIMULACRUM:SYS0:1:{self.name}:DESIGN:TWISS": self.design_twiss_pv,
f"SIMULACRUM:SYS0:1:{self.name}:LIVE:RMAT": self.live_rmat_pv,
f"SIMULACRUM:SYS0:1:{self.name}:DESIGN:RMAT": self.design_rmat_pv,}])
try:
zmq_task = self.loop.create_task(self.recv())
pva_refresh_task = self.loop.create_task(self.refresh_pva_table())
broadcast_task = self.loop.create_task(self.broadcast_model_changes())
jitter_task = self.loop.create_task(self.add_jitter())
self.loop.run_forever()
except KeyboardInterrupt:
L.info("Shutting down Model Service.")
zmq_task.cancel()
pva_refresh_task.cancel()
broadcast_task.cancel()
pva_server.stop()
finally:
self.loop.close()
L.info("Model Service shutdown complete.")
def get_twiss_table(self):
"""
Queries Tao for model and RMAT info.
Returns: A (twiss_table, rmat_table) tuple.
"""
start_time = time.time()
#First we get a list of all the elements.
#NOTE: the "-no_slaves" option for python lat_list only works in Tao 2019_1112 or above.
element_name_list = self.tao.cmd("python lat_list -track_only 1@0>>*|model ele.name")
L.debug(element_name_list)
for row in element_name_list:
assert "ERROR" not in element_name_list, "Fetching element names failed. This is probably because a version of Tao older than 2019_1112 is being used."
last_element_index = 0
for i, row in enumerate(reversed(element_name_list)):
if row == "END":
last_element_index = len(element_name_list)-1-i
break
element_data = {}
attrs = ("ele.s", "ele.l", "orbit.energy", "ele.a.alpha", "ele.a.beta", "ele.x.eta", "ele.x.etap", "ele.a.phi", "ele.b.alpha", "ele.b.beta", "ele.y.eta", "ele.y.etap", "ele.b.phi", "ele.mat6")
for attr in attrs:
element_data[attr] = self.tao.cmd_real("python lat_list -track_only 1@0>>*|model real:{}".format(attr))
if attr == 'ele.mat6':
element_data[attr] = element_data[attr].reshape((-1, 6, 6))
assert len(element_data[attr]) == len(element_name_list), "Number of elements in model data for {} doesn't match number of element names.".format(attr)
combined_rmat = np.identity(6)
twiss_table_rows = []
rmat_table_rows = []
for i in range(0,last_element_index+1):
element_name = element_name_list[i]
try:
device_name = simulacrum.util.convert_element_to_device(element_name.split("#")[0])
except KeyError:
device_name = ""
element_rmat = element_data['ele.mat6'][i]
rmat = np.matmul(element_rmat, combined_rmat)
combined_rmat = rmat
twiss_table_rows.append({"element": element_name, "device_name": device_name, "s": element_data['ele.s'][i], "length": element_data['ele.l'][i], "p0c": element_data['orbit.energy'][i],
"alpha_x": element_data['ele.a.alpha'][i], "beta_x": element_data['ele.a.beta'][i], "eta_x": element_data['ele.x.eta'][i], "etap_x": element_data['ele.x.etap'][i], "psi_x": element_data['ele.a.phi'][i],
"alpha_y": element_data['ele.b.alpha'][i], "beta_y": element_data['ele.b.beta'][i], "eta_y": element_data['ele.y.eta'][i], "etap_y": element_data['ele.y.etap'][i], "psi_y": element_data['ele.b.phi'][i]})
rmat_table_rows.append({
"element": element_name, "device_name": device_name, "s": element_data['ele.s'][i], "length": element_data['ele.l'][i],
"r11": rmat[0,0], "r12": rmat[0,1], "r13": rmat[0,2], "r14": rmat[0,3], "r15": rmat[0,4], "r16": rmat[0,5],
"r21": rmat[1,0], "r22": rmat[1,1], "r23": rmat[1,2], "r24": rmat[1,3], "r25": rmat[1,4], "r26": rmat[1,5],
"r31": rmat[2,0], "r32": rmat[2,1], "r33": rmat[2,2], "r34": rmat[2,3], "r35": rmat[2,4], "r36": rmat[2,5],
"r41": rmat[3,0], "r42": rmat[3,1], "r43": rmat[3,2], "r44": rmat[3,3], "r45": rmat[3,4], "r46": rmat[3,5],
"r51": rmat[4,0], "r52": rmat[4,1], "r53": rmat[4,2], "r54": rmat[4,3], "r55": rmat[4,4], "r56": rmat[4,5],
"r61": rmat[5,0], "r62": rmat[5,1], "r63": rmat[5,2], "r64": rmat[5,3], "r65": rmat[5,4], "r66": rmat[5,5]})
end_time = time.time()
L.debug("get_twiss_table took %f seconds", end_time - start_time)
return twiss_table_rows, rmat_table_rows
async def refresh_pva_table(self):
"""
This loop continuously checks if the PVAccess table needs to be refreshed,
and publishes a new table if it does. The pva_needs_refresh flag is
usually set when a tao command beginning with 'set' occurs.
"""
while True:
if self.pva_needs_refresh:
sec, nanosec = divmod(float(time.time()), 1.0)
new_twiss_table, new_rmat_table = self.get_twiss_table()
new_twiss_table = self.twiss_table.wrap(new_twiss_table)
new_twiss_table['timeStamp']['secondsPastEpoch'] = sec
new_twiss_table['timeStamp']['nanoseconds'] = nanosec
new_rmat_table = self.rmat_table.wrap(new_rmat_table)
new_rmat_table['timeStamp']['secondsPastEpoch'] = sec
new_rmat_table['timeStamp']['nanoseconds'] = nanosec
self.live_twiss_pv.post(new_twiss_table)
self.live_rmat_pv.post(new_rmat_table)
self.pva_needs_refresh = False
await asyncio.sleep(1.0)
async def add_jitter(self):
while True:
if self.jitter_enabled:
x0 = np.random.normal(0.0, 0.12*0.001)
y0 = np.random.normal(0.0, 0.12*0.001)
self.tao.cmd(f"set particle_start x = {x0}")
self.tao.cmd(f"set particle_start y = {y0}")
self.recalc_needed = True
self.need_zmq_broadcast = True
await asyncio.sleep(1.0)
async def broadcast_model_changes(self):
"""
This loop broadcasts new orbits, twiss parameters, etc. over ZMQ.
"""
while True:
if self.recalc_needed:
self.tao.cmd("set global lattice_calc_on = T")
self.tao.cmd("set global lattice_calc_on = F")
self.recalc_needed = False
if self.need_zmq_broadcast:
try:
self.send_orbit()
except Exception as e:
L.warning("SEND ORBIT FAILED: %s", e)
try:
self.send_profiles_data()
except Exception as e:
L.warning("SEND PROF DATA FAILED: %s", e)
try:
self.send_und_twiss()
except Exception as e:
L.warning("SEND UND TWISS FAILED: %s", e)
self.need_zmq_broadcast = False
await asyncio.sleep(0.1)
def model_changed(self):
self.recalc_needed = True
self.pva_needs_refresh = True
self.need_zmq_broadcast = True
def get_orbit(self):
start_time = time.time()
#Get X Orbit
x_orb_text = self.tao_cmd("show data orbit.x")[3:-2]
x_orb = _orbit_array_from_text(x_orb_text)
#Get Y Orbit
y_orb_text = self.tao_cmd("show data orbit.y")[3:-2]
y_orb = _orbit_array_from_text(y_orb_text)
#Get e_tot, which we use to see if the single particle beam is dead
e_text = self.tao_cmd("show data orbit.e")[3:-2]
e = _orbit_array_from_text(e_text)
end_time = time.time()
L.debug("get_orbit took %f seconds", end_time-start_time)
return np.stack((x_orb, y_orb, e))
def get_prof_orbit(self):
#Get X Orbit
x_orb_text = self.tao_cmd("show data orbit.profx")[3:-2]
x_orb = _orbit_array_from_text(x_orb_text)
#Get Y Orbit
y_orb_text = self.tao_cmd("show data orbit.profy")[3:-2]
y_orb = _orbit_array_from_text(y_orb_text)
return np.stack((x_orb, y_orb))
def get_twiss(self):
twiss_text = self.tao_cmd("show lat -no_label_lines -at alpha_a -at beta_a -at alpha_b -at beta_b UNDSTART")
if "ERROR" in twiss_text[0]:
twiss_text = self.tao_cmd("show lat -no_label_lines -at alpha_a -at beta_a -at alpha_b -at beta_b BEGUNDH")
if "ERROR" in twiss_text[0]:
twiss_text = self.tao_cmd("show lat -no_label_lines -at alpha_a -at beta_a -at alpha_b -at beta_b BEGUNDS")
#format to list of comma separated values
#msg='twiss from get_twiss: {}'.format(twiss_text)
#L.info(msg)
twiss = twiss_text[0].split()
return twiss
def old_get_orbit(self):
#Get X Orbit
x_orb_text = self.tao_cmd("python lat_list 1@0>>BPM*|model orbit.vec.1")
x_orb = _orbit_array_from_text(x_orb_text)
#Get Y Orbit
y_orb_text = self.tao_cmd("python lat_list 1@0>>BPM*|model orbit.vec.3")
y_orb = _orbit_array_from_text(y_orb_text)
return np.stack((x_orb, y_orb))
#information broadcast by the model is sent as two separate messages:
#metadata message: sent first with 1) tag describing data for services to filter on, 2) type -optional, 3) size -optional
#data message: sent either as a python object or a series of bits
def send_orbit(self):
orb = self.get_orbit()
metadata = {"tag" : "orbit", "dtype": str(orb.dtype), "shape": orb.shape}
self.model_broadcast_socket.send_pyobj(metadata, zmq.SNDMORE)
self.model_broadcast_socket.send(orb)
def send_profiles_data(self):
twiss_text = self.tao_cmd("show lat -no_label_lines -at beta_a -at beta_b -at e_tot Monitor::OTR*,Monitor::YAG*")
prof_beta_x = [float(l.split()[5]) for l in twiss_text]
prof_beta_y = [float(l.split()[6]) for l in twiss_text]
prof_e = [float(l.split()[7]) for l in twiss_text]
prof_names = [l.split()[1] for l in twiss_text]
prof_orbit = self.get_prof_orbit()
prof_data = np.concatenate((prof_orbit, np.array([prof_beta_x, prof_beta_y, prof_e, prof_names])))
metadata = {"tag" : "prof_data", "dtype": str(prof_data.dtype), "shape": prof_data.shape}
self.model_broadcast_socket.send_pyobj(metadata, zmq.SNDMORE)
self.model_broadcast_socket.send(prof_data);
def send_particle_positions(self):
twiss_text = self.tao_cmd("show lat -no_label_lines -at beta_a -at beta_b -at e_tot Monitor::OTR*,Monitor::YAG*")
prof_names = [l.split()[1] for l in twiss_text]
positions_all = {}
for screen in prof_names:
positions = self.get_particle_positions(screen);
if not positions:
continue
positions_all[screen] = [[float(position.split()[1]), float(position.split()[3])] for position in positions]
metadata = {"tag": "part_positions"}
self.model_broadcast_socket.send_pyobj(metadata, zmq.SNDMORE)
self.model_broadcast_socket.send_pyobj(positions_all)
def get_particle_positions(self, screen):
L.debug("Getting particle positions")
cmd = "show particle -all -ele {screen}".format(screen=screen)
results = self.tao_cmd(cmd);
if(len(results) < 3):
return False
return results[2:]
def send_und_twiss(self):
twiss = self.get_twiss()
metadata = {"tag": "und_twiss"}
self.model_broadcast_socket.send_pyobj(metadata, zmq.SNDMORE)
self.model_broadcast_socket.send_pyobj(twiss)
def tao_cmd(self, cmd):
if cmd.startswith("exit"):
return "Please stop trying to exit the model service's Tao, you jerk!"
result = self.tao.cmd(cmd)
if cmd.startswith("set"):
self.model_changed()
return result
def tao_batch(self, cmds):
L.info("Starting command batch.")
results = [self.tao_cmd(cmd) for cmd in cmds]
L.info("Batch complete.")
return results
async def recv(self):
s = self.ctx.socket(zmq.REP)
s.bind("tcp://*:{}".format(os.environ.get('MODEL_PORT', "12312")))
while True:
p = await s.recv_pyobj()
msg = "Got a message: {}".format(p)
L.debug(msg)
if p['cmd'] == 'tao':
try:
retval = self.tao_cmd(p['val'])
await s.send_pyobj({'status': 'ok', 'result': retval})
except Exception as e:
await s.send_pyobj({'status': 'fail', 'err': e})
elif p['cmd'] == 'send_orbit':
self.model_changed() #Sets the flag that will cause an orbit broadcast
await s.send_pyobj({'status': 'ok'})
elif p['cmd'] == 'echo':
await s.send_pyobj({'status': 'ok', 'result': p['val']})
elif p['cmd'] == 'send_profiles_twiss':
self.model_changed() #Sets the flag that will cause a prof broadcast
#self.send_profiles_twiss()
#self.send_prof_orbit()
await s.send_pyobj({'status': 'ok'})
elif p['cmd'] == 'send_und_twiss':
self.model_changed() #Sets the flag that will cause an und twiss broadcast
#self.send_und_twiss()
await s.send_pyobj({'status': 'ok'})
elif p['cmd'] == 'tao_batch':
try:
results = self.tao_batch(p['val'])
await s.send_pyobj({'status': 'ok', 'result': results})
except Exception as e:
await s.send_pyobj({'status': 'fail', 'err': e})
def _orbit_array_from_text(text):
return np.array([float(l.split()[5]) for l in text])*1000.0
def find_model(model_name):
"""
Helper routine to find models using standard environmental variables:
$LCLS_CLASSIC_LATTICE should point to a checkout of https://github.com/slaclab/lcls-classic-lattice
$LCLS_LATTICE should point to a checkout of https://github.com/slaclab/lcls-lattice
Availble models:
lcls_classic
cu_hxr
cu_spec
cu_sxr
sc_hxr
sc_sxr
"""
if model_name == 'lcls_classic':
tao_initfile = os.path.join(os.environ['LCLS_CLASSIC_LATTICE'], 'bmad/model/tao.init')
elif model_name in ['cu_hxr', 'cu_sxr', 'cu_spec', 'sc_sxr', 'sc_hxr']:
root = os.environ['LCLS_LATTICE']
tao_initfile = os.path.join(root, 'bmad/models/', model_name, 'tao.init')
else:
raise ValueError('Not a valid model: {}'.format(model_name))
assert os.path.exists(tao_initfile), 'Error: file does not exist: ' + tao_initfile
return tao_initfile
if __name__=="__main__":
parser = argparse.ArgumentParser(description="Simulacrum Model Service")
parser.add_argument(
'model_name',
help='Name of a Tao model from either lcls-lattice or lcls-classic-lattice. Must be one of: ' +
'lcls_classic, cu_hxr, cu_spec, cu_sxr, sc_sxr, or sc_hxr'
)
parser.add_argument(
'--enable-jitter',
action='store_true',
help='Apply jitter on every model update tick (10 Hz). This will significantly increase CPU usage.'
)
parser.add_argument(
'--plot',
action='store_true',
help='Show tao plot'
)
model_service_args = parser.parse_args()
tao_init_file = find_model(model_service_args.model_name)
serv = ModelService(init_file=tao_init_file, name=model_service_args.model_name.upper(), enable_jitter=model_service_args.enable_jitter,
plot=model_service_args.plot)
serv.start()
| self.name = name
tao_lib = os.environ.get('TAO_LIB', '')
self.tao = pytao.Tao(so_lib=tao_lib)
L.debug("Initializing Tao...")
if plot:
self.tao.init("-init {init_file}".format(init_file=init_file))
else:
self.tao.init("-noplot -init {init_file}".format(init_file=init_file))
L.debug("Tao initialization complete!")
self.tao.cmd("set global lattice_calc_on = F")
self.tao.cmd('set global var_out_file = " "')
self.ctx = Context.instance()
self.model_broadcast_socket = zmq.Context().socket(zmq.PUB)
self.model_broadcast_socket.bind("tcp://*:{}".format(os.environ.get('MODEL_BROADCAST_PORT', 66666)))
self.loop = asyncio.get_event_loop()
self.jitter_enabled = enable_jitter
self.twiss_table = NTTable([("element", "s"), ("device_name", "s"),
("s", "d"), ("length", "d"), ("p0c", "d"),
("alpha_x", "d"), ("beta_x", "d"), ("eta_x", "d"), ("etap_x", "d"), ("psi_x", "d"),
("alpha_y", "d"), ("beta_y", "d"), ("eta_y", "d"), ("etap_y", "d"), ("psi_y", "d")])
self.rmat_table = NTTable([("element", "s"), ("device_name", "s"), ("s", "d"), ("length", "d"),
("r11", "d"), ("r12", "d"), ("r13", "d"), ("r14", "d"), ("r15", "d"), ("r16", "d"),
("r21", "d"), ("r22", "d"), ("r23", "d"), ("r24", "d"), ("r25", "d"), ("r26", "d"),
("r31", "d"), ("r32", "d"), ("r33", "d"), ("r34", "d"), ("r35", "d"), ("r36", "d"),
("r41", "d"), ("r42", "d"), ("r43", "d"), ("r44", "d"), ("r45", "d"), ("r46", "d"),
("r51", "d"), ("r52", "d"), ("r53", "d"), ("r54", "d"), ("r55", "d"), ("r56", "d"),
("r61", "d"), ("r62", "d"), ("r63", "d"), ("r64", "d"), ("r65", "d"), ("r66", "d")])
initial_twiss_table, initial_rmat_table = self.get_twiss_table()
sec, nanosec = divmod(float(time.time()), 1.0)
initial_twiss_table = self.twiss_table.wrap(initial_twiss_table)
initial_twiss_table['timeStamp']['secondsPastEpoch'] = sec
initial_twiss_table['timeStamp']['nanoseconds'] = nanosec
initial_rmat_table = self.rmat_table.wrap(initial_rmat_table)
initial_rmat_table['timeStamp']['secondsPastEpoch'] = sec
initial_rmat_table['timeStamp']['nanoseconds'] = nanosec
self.live_twiss_pv = SharedPV(nt=self.twiss_table,
initial=initial_twiss_table,
loop=self.loop)
self.design_twiss_pv = SharedPV(nt=self.twiss_table,
initial=initial_twiss_table,
loop=self.loop)
self.live_rmat_pv = SharedPV(nt=self.rmat_table,
initial=initial_rmat_table,
loop=self.loop)
self.design_rmat_pv = SharedPV(nt=self.rmat_table,
initial=initial_rmat_table,
loop=self.loop)
self.recalc_needed = False
self.pva_needs_refresh = False
self.need_zmq_broadcast = False |
receive.go | package receiver
import (
"log"
"github.com/streadway/amqp" | )
// Check and return value for each amqp call
func failOnError(err error, msg string) {
if err != nil {
log.Fatalf("%s: %s", msg, err)
}
}
// Connect to the rabbitMQ server
func connectToRabbitMQ() {
conn, err := amqp.Dial("amqp://guest:guest@localhost:5672/")
failOnError(err, "Failed to connect to RabbitMQ")
defer conn.Close()
}
func createChannel() {
ch, err := conn.Channel()
failOnError(err, )
} | |
map.rs | // Copyright 2020 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under the MIT license <LICENSE-MIT
// https://opensource.org/licenses/MIT> or the Modified BSD license <LICENSE-BSD
// https://opensource.org/licenses/BSD-3-Clause>, at your option. This file may not be copied,
// modified, or distributed except according to those terms. Please review the Licences for the
// specific language governing permissions and limitations relating to use of the SAFE Network
// Software.
use super::{AuthorisationKind, CmdError, DataAuthKind, QueryResponse};
use crate::{
Error, Map, MapAddress as Address, MapEntryActions as Changes,
MapPermissionSet as PermissionSet, PublicKey, XorName,
};
use serde::{Deserialize, Serialize};
use std::fmt;
/// TODO: docs
#[derive(Hash, Eq, PartialEq, PartialOrd, Clone, Serialize, Deserialize)]
pub enum MapRead {
/// Get Map.
Get(Address),
/// Get Map value.
GetValue {
/// Map address.
address: Address,
/// Key to get.
key: Vec<u8>,
},
/// Get Map shell.
GetShell(Address),
/// Get Map version.
GetVersion(Address),
/// List Map entries.
ListEntries(Address),
/// List Map keys.
ListKeys(Address),
/// List Map values.
ListValues(Address),
/// List Map permissions.
ListPermissions(Address),
/// Get Map permissions for a user.
ListUserPermissions {
/// Map address.
address: Address,
/// User to get permissions for.
user: PublicKey,
},
}
/// TODO: docs
#[allow(clippy::large_enum_variant)]
#[derive(Hash, Eq, PartialEq, PartialOrd, Clone, Serialize, Deserialize)]
pub enum MapWrite {
/// Create new Map.
New(Map),
/// Delete instance.
Delete(Address),
/// Edit entries.
Edit {
/// Map address.
address: Address,
/// Changes to apply.
changes: Changes,
},
/// Delete user permissions.
DelUserPermissions {
/// Map address.
address: Address,
/// User to delete permissions for.
user: PublicKey,
/// Version to delete.
version: u64,
},
/// Set user permissions.
SetUserPermissions {
/// Map address.
address: Address,
/// User to set permissions for.
user: PublicKey,
/// New permissions.
permissions: PermissionSet,
/// Version to set.
version: u64,
},
}
impl MapRead {
/// Creates a Response containing an error, with the Response variant corresponding to the
/// Request variant.
pub fn error(&self, error: Error) -> QueryResponse {
use MapRead::*;
match *self {
Get(_) => QueryResponse::GetMap(Err(error)),
GetValue { .. } => QueryResponse::GetMapValue(Err(error)),
GetShell(_) => QueryResponse::GetMapShell(Err(error)),
GetVersion(_) => QueryResponse::GetMapVersion(Err(error)),
ListEntries(_) => QueryResponse::ListMapEntries(Err(error)),
ListKeys(_) => QueryResponse::ListMapKeys(Err(error)),
ListValues(_) => QueryResponse::ListMapValues(Err(error)),
ListPermissions(_) => QueryResponse::ListMapPermissions(Err(error)),
ListUserPermissions { .. } => QueryResponse::ListMapUserPermissions(Err(error)),
}
}
/// Returns the type of authorisation needed for the request.
pub fn authorisation_kind(&self) -> AuthorisationKind {
use MapRead::*;
match *self {
Get(_)
| GetValue { .. }
| GetShell(_)
| GetVersion(_)
| ListEntries(_)
| ListKeys(_)
| ListValues(_) | }
}
/// Returns the address of the destination for request.
pub fn dst_address(&self) -> XorName {
use MapRead::*;
match self {
Get(ref address)
| GetValue { ref address, .. }
| GetShell(ref address)
| GetVersion(ref address)
| ListEntries(ref address)
| ListKeys(ref address)
| ListValues(ref address)
| ListPermissions(ref address)
| ListUserPermissions { ref address, .. } => *address.name(),
}
}
}
impl fmt::Debug for MapRead {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
use MapRead::*;
write!(
formatter,
"Request::{}",
match *self {
Get(_) => "GetMap",
GetValue { .. } => "GetMapValue",
GetShell(_) => "GetMapShell",
GetVersion(_) => "GetMapVersion",
ListEntries(_) => "ListMapEntries",
ListKeys(_) => "ListMapKeys",
ListValues(_) => "ListMapValues",
ListPermissions(_) => "ListMapPermissions",
ListUserPermissions { .. } => "ListMapUserPermissions",
}
)
}
}
impl MapWrite {
/// Creates a Response containing an error, with the Response variant corresponding to the
/// Request variant.
pub fn error(&self, error: Error) -> CmdError {
CmdError::Data(error)
}
/// Returns the type of authorisation needed for the request.
pub fn authorisation_kind(&self) -> AuthorisationKind {
AuthorisationKind::Data(DataAuthKind::Write)
}
/// Returns the address of the destination for request.
pub fn dst_address(&self) -> XorName {
use MapWrite::*;
match self {
New(ref data) => *data.name(),
Delete(ref address)
| SetUserPermissions { ref address, .. }
| DelUserPermissions { ref address, .. }
| Edit { ref address, .. } => *address.name(),
}
}
/// Returns the owner of the data on a New map write.
pub fn owner(&self) -> Option<PublicKey> {
match self {
Self::New(data) => Some(data.owner()),
_ => None,
}
}
}
impl fmt::Debug for MapWrite {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
use MapWrite::*;
write!(
formatter,
"Request::{}",
match *self {
New(_) => "NewMap",
Delete(_) => "DeleteMap",
SetUserPermissions { .. } => "SetMapUserPermissions",
DelUserPermissions { .. } => "DelMapUserPermissions",
Edit { .. } => "EditMap",
}
)
}
} | | ListPermissions(_)
| ListUserPermissions { .. } => AuthorisationKind::Data(DataAuthKind::PrivateRead), |
test_timeout.py | import sys
import time
import pytest
from easyprocess import EasyProcess
python = sys.executable
def test_timeout():
|
@pytest.mark.timeout(10)
def test_time_cli1():
p = EasyProcess(
[
python,
"-c",
"import logging;logging.basicConfig(level=logging.DEBUG);from easyprocess import EasyProcess;EasyProcess('sleep 15').start()",
]
)
p.call()
assert p.return_code == 0
@pytest.mark.timeout(10)
def test_time_cli2():
p = EasyProcess(
[
python,
"-c",
"import logging;logging.basicConfig(level=logging.DEBUG);from easyprocess import EasyProcess;EasyProcess('sleep 15').call(timeout=0.5)",
]
)
p.call()
assert p.return_code == 0
@pytest.mark.timeout(10)
def test_time2():
p = EasyProcess("sleep 15").call(timeout=1)
assert p.is_alive() is False
assert p.timeout_happened
assert p.return_code != 0
assert p.stdout == ""
@pytest.mark.timeout(10)
def test_timeout_out():
p = EasyProcess(
[python, "-c", "import time;print( 'start');time.sleep(15);print( 'end')"]
).call(timeout=1)
assert p.is_alive() is False
assert p.timeout_happened
assert p.return_code != 0
assert p.stdout == ""
@pytest.mark.timeout(3)
def test_time3():
EasyProcess("sleep 15").start()
ignore_term = """
import signal;
import time;
signal.signal(signal.SIGTERM, lambda *args: None);
while True:
time.sleep(0.5);
"""
@pytest.mark.timeout(10)
def test_force_timeout():
proc = EasyProcess([python, "-c", ignore_term]).start()
# Calling stop() right away actually stops python before it
# has a change to actually compile and run the input code,
# meaning the signal handlers aren't registered yet. Give it
# a moment to setup
time.sleep(1)
proc.stop(kill_after=1)
assert proc.is_alive() is False
assert proc.return_code != 0
@pytest.mark.timeout(10)
def test_force_0_timeout():
proc = EasyProcess([python, "-c", ignore_term]).start()
time.sleep(1)
proc.stop(kill_after=0)
assert proc.is_alive() is False
assert proc.return_code != 0
@pytest.mark.timeout(10)
def test_force_timeout2():
proc = EasyProcess([python, "-c", ignore_term]).call(timeout=1, kill_after=1)
assert proc.is_alive() is False
assert proc.return_code != 0
@pytest.mark.timeout(10)
def test_stop_wait():
proc = EasyProcess([python, "-c", ignore_term]).start()
time.sleep(1)
proc.sendstop().wait(timeout=1)
# On windows, Popen.terminate actually behaves like kill,
# so don't check that our hanging process code is actually hanging.
# The end result is still what we want. On other platforms, leave
# this assertion to make sure we are correctly testing the ability
# to stop a hung process
if not sys.platform.startswith("win"):
assert proc.is_alive() is True
proc.stop(kill_after=1)
assert proc.is_alive() is False
assert proc.return_code != 0
| p = EasyProcess("sleep 1").start()
p.wait(0.2)
assert p.is_alive()
p.wait(0.2)
assert p.is_alive()
p.wait(2)
assert not p.is_alive()
assert EasyProcess("sleep 0.3").call().return_code == 0
assert EasyProcess("sleep 0.3").call(timeout=0.1).return_code != 0
assert EasyProcess("sleep 0.3").call(timeout=1).return_code == 0
assert EasyProcess("sleep 0.3").call().timeout_happened is False
assert EasyProcess("sleep 0.3").call(timeout=0.1).timeout_happened
assert EasyProcess("sleep 0.3").call(timeout=1).timeout_happened is False |
rewindable_pmmr.rs | // Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Rewindable (but still readonly) view of a PMMR.
//! Only supports non-pruneable backends (i.e. kernel MMR backend).
use std::marker;
use crate::core::hash::{Hash, ZERO_HASH};
use crate::core::pmmr::{bintree_postorder_height, is_leaf, peaks, Backend};
use crate::ser::{PMMRIndexHashable, PMMRable};
/// Rewindable (but still readonly) view of a PMMR.
pub struct RewindablePMMR<'a, T, B>
where
T: PMMRable,
B: Backend<T>,
{
/// The last position in the PMMR
last_pos: u64,
/// The backend for this readonly PMMR
backend: &'a B,
// only needed to parameterise Backend
_marker: marker::PhantomData<T>,
}
impl<'a, T, B> RewindablePMMR<'a, T, B>
where
T: PMMRable,
B: 'a + Backend<T>,
{
/// Build a new readonly PMMR.
pub fn new(backend: &'a B) -> RewindablePMMR<'_, T, B> {
RewindablePMMR {
backend,
last_pos: 0,
_marker: marker::PhantomData,
}
}
/// Reference to the underlying storage backend.
pub fn backend(&'a self) -> &dyn Backend<T> {
self.backend
}
/// Build a new readonly PMMR pre-initialized to
/// last_pos with the provided backend.
pub fn at(backend: &'a B, last_pos: u64) -> RewindablePMMR<'_, T, B> {
RewindablePMMR {
backend,
last_pos,
_marker: marker::PhantomData,
}
}
/// Note: We only rewind the last_pos, we do not rewind the (readonly) backend.
/// Prunable backends are not supported here.
pub fn rewind(&mut self, position: u64) -> Result<(), String> {
// Identify which actual position we should rewind to as the provided
// position is a leaf. We traverse the MMR to include any parent(s) that
// need to be included for the MMR to be valid.
let mut pos = position;
while bintree_postorder_height(pos + 1) > 0 {
pos += 1;
}
self.last_pos = pos;
Ok(())
}
/// Get the data element at provided position in the MMR.
pub fn get_data(&self, pos: u64) -> Option<T::E> {
if pos > self.last_pos {
// If we are beyond the rhs of the MMR return None.
None
} else if is_leaf(pos) {
// If we are a leaf then get data from the backend.
self.backend.get_data(pos)
} else {
// If we are not a leaf then return None as only leaves have data.
None
}
}
/// Is the MMR empty?
pub fn is_empty(&self) -> bool {
self.last_pos == 0
}
/// Computes the root of the MMR. Find all the peaks in the current
/// tree and "bags" them to get a single peak.
pub fn root(&self) -> Result<Hash, String> {
if self.is_empty() {
return Ok(ZERO_HASH);
}
let mut res = None;
for peak in self.peaks().iter().rev() {
res = match res {
None => Some(*peak),
Some(rhash) => Some((*peak, rhash).hash_with_index(self.unpruned_size())),
}
}
res.ok_or_else(|| "no root, invalid tree".to_owned())
}
/// Returns a vec of the peaks of this MMR.
pub fn peaks(&self) -> Vec<Hash> |
/// Total size of the tree, including intermediary nodes and ignoring any
/// pruning.
pub fn unpruned_size(&self) -> u64 {
self.last_pos
}
}
| {
let peaks_pos = peaks(self.last_pos);
peaks_pos
.into_iter()
.filter_map(|pi| {
// here we want to get from underlying hash file
// as the pos *may* have been "removed"
self.backend.get_from_file(pi)
})
.collect()
} |
MicoaelPrimo.py | from manimlib.imports import *
class StartingScene(Scene):
def construct(_):
e = Text("Manim homework by mp",font="Consolas",color=BLUE)
_.play(Write(e),run_time=3)
_.wait()
_.play(Uncreate(e))
A = Dot().move_to(np.array([0-2,0,0]))
B = Dot().move_to(np.array([9/10-2,12/10,0]))
C = Dot().move_to(np.array([5/2-2,0,0]))
D = B.copy().shift(9/10*UP+6/5*LEFT)
E = A.copy().shift(9/10*UP+6/5*LEFT)
F = B.copy().shift(8/5*UP+6/5*RIGHT)
G = C.copy().shift(8/5*UP+6/5*RIGHT)
H = A.copy().shift(5/2*DOWN)
I = C.copy().shift(5/2*DOWN)
lab = VGroup()
labtxt = [TextMobject("A").next_to(A).scale(0.5),
TextMobject("B").next_to(B).scale(0.5),
TextMobject("C").next_to(C).scale(0.5),
TextMobject("D").next_to(D).scale(0.5),
TextMobject("E").next_to(E).scale(0.5),
TextMobject("F").next_to(F).scale(0.5),
TextMobject("G").next_to(G).scale(0.5),
TextMobject("H").next_to(H).scale(0.5),
TextMobject("I").next_to(I).scale(0.5),
]
for i in range(len(labtxt)):
lab.add(labtxt[i])
original_trangle = Polygon(A.get_center(),B.get_center(),C.get_center(),color=ORANGE,fill_color = ORANGE,fill_opacity=0.5)
rect1 = Polygon(A.get_center(),B.get_center(),D.get_center(),E.get_center(),color=GREEN,fill_color = GREEN,fill_opacity=0.5)
rect2 = Polygon(B.get_center(),F.get_center(),G.get_center(),C.get_center(),color=GREEN,fill_color = GREEN,fill_opacity=0.5)
rect3 = Polygon(A.get_center(),C.get_center(),I.get_center(),H.get_center(),color=GREEN,fill_color = GREEN,fill_opacity=0.5)
tran1 = Polygon(D.get_center(),F.get_center(),B.get_center(),color=YELLOW,fill_color = YELLOW,fill_opacity=0.5)
tran2 = Polygon(E.get_center(),A.get_center(),H.get_center(),color=YELLOW,fill_color = YELLOW,fill_opacity=0.5)
tran3 = Polygon(C.get_center(),G.get_center(),I.get_center(),color=YELLOW,fill_color = YELLOW,fill_opacity=0.5)
def getc1(obj):
obj.move_to(tran1.get_center())
| def getc2(obj):
obj.move_to(tran2.get_center())
def getc3(obj):
obj.move_to(tran3.get_center())
S1 = TexMobject("S1").add_updater(getc1)
S2 = TexMobject("S2").add_updater(getc2)
S3 = TexMobject("S3").add_updater(getc3)
trans = VGroup(tran1,tran2,tran3,S1,S2,S3)
# _.add(A,B,C,D,E,F,G,H,I,lab,original_trangle,rect1,rect2,rect3,tran1,tran2,tran3,S1,S2,S3)
_.play(ShowCreation(original_trangle))
_.wait()
_.play(ShowCreation(rect1),ShowCreation(rect2),ShowCreation(rect3))
_.wait()
_.play(ShowCreation(tran1),ShowCreation(tran2),ShowCreation(tran3)
,Write(S1),Write(S2),Write(S3) ,)
_.wait()
_.play(FadeOut(rect1),FadeOut(rect2),FadeOut(rect3))
_.wait()
_.play(Rotate(tran1,PI/2,about_point = B.get_center()),
Rotate(tran2,PI/2,about_point = A.get_center()),
Rotate(tran3,PI/2,about_point = C.get_center()) )
_.play(Transform(tran1,original_trangle))
_.play(Transform(tran2,original_trangle))
_.play(Transform(tran3,original_trangle))
S1.clear_updaters()
S2.clear_updaters()
S3.clear_updaters()
_.play(S1.shift,2*UP+1.5*LEFT)
_.play(S2.shift,2*UP)
_.play(S3.shift,2*UP+1.5*RIGHT)
eq = TextMobject("=").next_to(S1)
eq2 = TextMobject("=").next_to(S2)
_.play(Write(eq),Write(eq2)) | |
zsys_netbsd.go | // Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs defs_netbsd.go
package ipv6
const (
sizeofSockaddrInet6 = 0x1c
sizeofInet6Pktinfo = 0x14
sizeofIPv6Mtuinfo = 0x20
sizeofIPv6Mreq = 0x14
sizeofICMPv6Filter = 0x20
)
type sockaddrInet6 struct {
Len uint8
Family uint8
Port uint16
Flowinfo uint32
Addr [16]byte /* in6_addr */
Scope_id uint32
}
type inet6Pktinfo struct {
Addr [16]byte /* in6_addr */
Ifindex uint32 | type ipv6Mtuinfo struct {
Addr sockaddrInet6
Mtu uint32
}
type ipv6Mreq struct {
Multiaddr [16]byte /* in6_addr */
Interface uint32
}
type icmpv6Filter struct {
Filt [8]uint32
} | }
|
config.ts | import merge from 'lodash/merge';
import { getTheme } from '@grafana/ui';
import { DataSourceInstanceSettings, GrafanaTheme, GrafanaThemeType, PanelPluginMeta } from '@grafana/data';
/**
* Describes the build information that will be available via the Grafana cofiguration.
*
* @public
*/
export interface BuildInfo {
version: string;
commit: string;
/**
* Is set to true when running Grafana Enterprise edition.
*
* @deprecated use `licenseInfo.hasLicense` instead
*/
isEnterprise: boolean;
env: string;
edition: string;
latestVersion: string;
hasUpdate: boolean;
}
/**
* Describes available feature toggles in Grafana. These can be configured via the
* `conf/custom.ini` to enable features under development or not yet available in
* stable version.
*
* @public
*/
export interface FeatureToggles {
transformations: boolean;
expressions: boolean;
newEdit: boolean;
/**
* @remarks
* Available only in Grafana Enterprise
*/
meta: boolean;
newVariables: boolean;
tracingIntegration: boolean;
}
/**
* Describes the license information about the current running instance of Grafana.
*
* @public
*/
export interface LicenseInfo {
hasLicense: boolean;
expiry: number;
licenseUrl: string;
stateInfo: string;
}
/**
* Describes all the different Grafana configuration values available for an instance.
*
* @public
*/
export class GrafanaBootConfig {
datasources: { [str: string]: DataSourceInstanceSettings } = {};
panels: { [key: string]: PanelPluginMeta } = {};
minRefreshInterval = '';
appSubUrl = '';
windowTitlePrefix = '';
buildInfo: BuildInfo = {} as BuildInfo;
newPanelTitle = '';
bootData: any;
externalUserMngLinkUrl = '';
externalUserMngLinkName = '';
externalUserMngInfo = '';
allowOrgCreate = false;
disableLoginForm = false;
defaultDatasource = '';
alertingEnabled = false;
alertingErrorOrTimeout = '';
alertingNoDataOrNullValues = '';
alertingMinInterval = 1;
authProxyEnabled = false;
exploreEnabled = false;
ldapEnabled = false;
samlEnabled = false;
autoAssignOrg = true;
verifyEmailEnabled = false;
oauth: any;
disableUserSignUp = false;
loginHint: any;
passwordHint: any;
loginError: any;
navTree: any;
viewersCanEdit = false;
editorsCanAdmin = false;
disableSanitizeHtml = false;
theme: GrafanaTheme;
pluginsToPreload: string[] = [];
featureToggles: FeatureToggles = {
transformations: false,
expressions: false,
newEdit: false,
meta: false,
newVariables: true,
tracingIntegration: false,
};
licenseInfo: LicenseInfo = {} as LicenseInfo;
rendererAvailable = false;
constructor(options: GrafanaBootConfig) {
this.theme = options.bootData.user.lightTheme ? getTheme(GrafanaThemeType.Light) : getTheme(GrafanaThemeType.Dark);
const defaults = {
datasources: {},
windowTitlePrefix: 'Grafana - ',
panels: {},
newPanelTitle: 'Panel Title',
playlist_timespan: '1m',
unsaved_changes_warning: true,
appSubUrl: '',
buildInfo: {
version: 'v1.0',
commit: '1',
env: 'production',
isEnterprise: false,
},
viewersCanEdit: false,
editorsCanAdmin: false,
disableSanitizeHtml: false,
}; | }
const bootData = (window as any).grafanaBootData || {
settings: {},
user: {},
navTree: [],
};
const options = bootData.settings;
options.bootData = bootData;
/**
* Use this to access the {@link GrafanaBootConfig} for the current running Grafana instance.
*
* @public
*/
export const config = new GrafanaBootConfig(options); |
merge(this, defaults, options);
} |
config.go | // Package ec2config defines EC2 configuration.
package ec2config
import (
"bytes"
"fmt"
"io/ioutil"
"path/filepath"
"sync"
"time"
"github.com/aws/aws-k8s-tester/pkg/timeutil"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"sigs.k8s.io/yaml" // must use "sigs.k8s.io/yaml"
)
// AWS_K8S_TESTER_EC2_PREFIX is the environment variable prefix used for "ec2config".
const AWS_K8S_TESTER_EC2_PREFIX = "AWS_K8S_TESTER_EC2_"
const (
// AMITypeBottleRocketCPU is the AMI type for Bottlerocket OS.
// https://github.com/bottlerocket-os/bottlerocket
AMITypeBottleRocketCPU = "BOTTLEROCKET_x86_64"
// AMITypeAL2X8664 is the AMI type for Amazon Linux 2 AMI.
AMITypeAL2X8664 = "AL2_x86_64"
// AMITypeAL2X8664GPU is the AMI type for Amazon Linux 2 AMI with GPU.
AMITypeAL2X8664GPU = "AL2_x86_64_GPU"
// DefaultNodeInstanceTypeCPU is the default EC2 instance type for CPU worker node.
DefaultNodeInstanceTypeCPU = "c5.xlarge"
// DefaultNodeInstanceTypeGPU is the default EC2 instance type for GPU worker node.
DefaultNodeInstanceTypeGPU = "p3.8xlarge"
// DefaultNodeVolumeSize is the default EC2 instance volume size for a worker node.
DefaultNodeVolumeSize = 40
// ASGsMaxLimit is the maximum number of "Managed Node Group"s per a EKS cluster.
ASGsMaxLimit = 10
// ASGMaxLimit is the maximum number of nodes per a "Managed Node Group".
ASGMaxLimit = 100
)
// Config defines EC2 configuration.
type Config struct {
mu *sync.RWMutex
// Up is true if the cluster is up.
Up bool `json:"up"`
TimeFrameCreate timeutil.TimeFrame `json:"time-frame-create" read-only:"true"`
TimeFrameDelete timeutil.TimeFrame `json:"time-frame-delete" read-only:"true"`
// StatusCurrent represents the current status of the cluster.
StatusCurrent string `json:"status-current"`
// Status represents the status of the cluster.
Status []Status `json:"status"`
// Name is the cluster name.
// If empty, deployer auto-populates it.
Name string `json:"name"`
// Partition is the AWS partition for EC2 deployment region.
// If empty, set default partition "aws".
Partition string `json:"partition"`
// Region is the AWS geographic area for EC2 deployment.
// If empty, set default region.
Region string `json:"region"`
// ConfigPath is the configuration file path.
// Deployer is expected to update this file with latest status.
ConfigPath string `json:"config-path,omitempty"`
// AWSAccountID is the account ID of the eks tester caller session.
AWSAccountID string `json:"aws-account-id" read-only:"true"`
// AWSUserID is the user ID of the eks tester caller session.
AWSUserID string `json:"aws-user-id" read-only:"true"`
// AWSIAMRoleARN is the user IAM Role ARN of the eks tester caller session.
AWSIAMRoleARN string `json:"aws-iam-role-arn" read-only:"true"`
// AWSCredentialPath is automatically set via AWS SDK Go.
// And to be mounted as a volume as 'Secret' object.
AWSCredentialPath string `json:"aws-credential-path" read-only:"true"`
// LogColor is true to output logs in color.
LogColor bool `json:"log-color"`
// LogColorOverride is true to use "LogColor" setting
// even if the current terminal does not support color outputs.
// Useful to output in color in HTML based log outputs (e.g. Prow).
LogColorOverride bool `json:"log-color-override"`
// LogLevel configures log level. Only supports debug, info, warn, error, panic, or fatal. Default 'info'.
LogLevel string `json:"log-level"`
// LogOutputs is a list of log outputs. Valid values are 'default', 'stderr', 'stdout', or file names.
// Logs are appended to the existing file, if any.
// Multiple values are accepted. If empty, it sets to 'default', which outputs to stderr.
// See https://pkg.go.dev/go.uber.org/zap#Open and https://pkg.go.dev/go.uber.org/zap#Config for more details.
LogOutputs []string `json:"log-outputs,omitempty"`
// OnFailureDelete is true to delete all resources on creation fail.
OnFailureDelete bool `json:"on-failure-delete"`
// OnFailureDeleteWaitSeconds is the seconds to wait before deleting
// all resources on creation fail.
OnFailureDeleteWaitSeconds uint64 `json:"on-failure-delete-wait-seconds"`
// S3BucketCreate is true to auto-create S3 bucket.
S3BucketCreate bool `json:"s3-bucket-create"`
// S3BucketCreateKeep is true to not delete auto-created S3 bucket.
// The created S3 bucket is kept.
S3BucketCreateKeep bool `json:"s3-bucket-create-keep"`
// S3BucketName is the name of cluster S3.
S3BucketName string `json:"s3-bucket-name"`
// S3BucketLifecycleExpirationDays is expiration in days for the lifecycle of the object.
S3BucketLifecycleExpirationDays int64 `json:"s3-bucket-lifecycle-expiration-days"`
// RoleName is the name of cluster role.
RoleName string `json:"role-name"`
// RoleCreate is true to auto-create and delete cluster role.
RoleCreate bool `json:"role-create"`
// RoleARN is the role ARN that EC2 uses to create AWS resources for Kubernetes.
// By default, it's empty which triggers tester to create one.
RoleARN string `json:"role-arn"`
// RoleServicePrincipals is the EC2 Role Service Principals
RoleServicePrincipals []string `json:"role-service-principals"`
// RoleManagedPolicyARNs is EC2 Role managed policy ARNs.
RoleManagedPolicyARNs []string `json:"role-managed-policy-arns"`
RoleCFNStackID string `json:"role-cfn-stack-id" read-only:"true"`
RoleCFNStackYAMLFilePath string `json:"role-cfn-stack-yaml-file-path" read-only:"true"`
// VPCCreate is true to auto-create and delete VPC.
VPCCreate bool `json:"vpc-create"`
// VPCID is the VPC ID for cluster creation.
// If not empty, VPC is reused and not deleted.
// If empty, VPC is created anew and deleted on cluster deletion.
VPCID string `json:"vpc-id"`
VPCCFNStackID string `json:"vpc-cfn-stack-id" read-only:"true"`
VPCCFNStackYAMLFilePath string `json:"vpc-cfn-stack-yaml-file-path" read-only:"true"`
// SSHIngressIPv4Range is the IP range for SSH inbound traffic.
SSHIngressIPv4Range string `json:"ssh-ingress-ipv4-range"`
// VpcCIDR is the IP range (CIDR notation) for VPC, must be a valid private
// (RFC 1918) CIDR range.
VPCCIDR string `json:"vpc-cidr,omitempty"`
// PublicSubnetCIDR1 is the CIDR Block for subnet 1 within the VPC.
PublicSubnetCIDR1 string `json:"public-subnet-cidr-1,omitempty"`
// PublicSubnetCIDR2 is the CIDR Block for subnet 2 within the VPC.
PublicSubnetCIDR2 string `json:"public-subnet-cidr-2,omitempty"`
// PublicSubnetCIDR3 is the CIDR Block for subnet 3 within the VPC.
PublicSubnetCIDR3 string `json:"public-subnet-cidr-3,omitempty"`
// PrivateSubnetCIDR1 is the CIDR Block for subnet 1 within the VPC.
PrivateSubnetCIDR1 string `json:"private-subnet-cidr-1,omitempty"`
// PrivateSubnetCIDR2 is the CIDR Block for subnet 2 within the VPC.
PrivateSubnetCIDR2 string `json:"private-subnet-cidr-2,omitempty"`
// PublicSubnetIDs is the list of all public subnets in the VPC.
PublicSubnetIDs []string `json:"public-subnet-ids" read-only:"true"`
// PrivateSubnetIDs is the list of all private subnets in the VPC.
PrivateSubnetIDs []string `json:"private-subnet-ids" read-only:"true"`
// DHCPOptionsDomainName is used to complete unqualified DNS hostnames for VPC.
// ref. https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-dhcp-options.html
// ref. https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html
DHCPOptionsDomainName string `json:"dhcp-options-domain-name"`
// DHCPOptionsDomainNameServers is a list of strings.
// The IPv4 addresses of up to four domain name servers, or AmazonProvidedDNS, for VPC.
// ref. https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-dhcp-options.html
// ref. https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html
DHCPOptionsDomainNameServers []string `json:"dhcp-options-domain-name-servers"`
// SecurityGroupID is the security group ID for the VPC.
SecurityGroupID string `json:"security-group-id" read-only:"true"`
// RemoteAccessKeyCreate is true to create the remote SSH access private key.
RemoteAccessKeyCreate bool `json:"remote-access-key-create"`
// RemoteAccessKeyName is the remote SSH access private key name.
RemoteAccessKeyName string `json:"remote-access-key-name"`
// RemoteAccessPrivateKeyPath is the remote SSH access private key path.
RemoteAccessPrivateKeyPath string `json:"remote-access-private-key-path"`
// RemoteAccessCommandsOutputPath is the output path for ssh commands.
RemoteAccessCommandsOutputPath string `json:"remote-access-commands-output-path,omitempty"`
// ASGsFetchLogs is true to fetch logs from remote nodes using SSH.
ASGsFetchLogs bool `json:"asgs-fetch-logs"`
// ASGsLogsDir is set to specify the target directory to store all remote log files.
// If empty, it stores in the same directory as "ConfigPath".
ASGsLogsDir string `json:"asgs-logs-dir,omitempty"`
// ASGs is a map from each ASG name to EC2 ASG.
ASGs map[string]ASG `json:"asgs"`
}
// Status is the status.
type Status struct {
Time time.Time `json:"time"`
Status string `json:"status"`
}
// TODO: asg status
const StatusDELETEDORNOTEXIST = "DELETED/NOT-EXIST"
// RecordStatus records cluster status.
func (cfg *Config) RecordStatus(status string) {
cfg.mu.Lock()
defer cfg.mu.Unlock()
cfg.StatusCurrent = status
switch status {
case StatusDELETEDORNOTEXIST:
cfg.Up = false
case "TODO/active":
cfg.Up = true
}
| cfg.Status = []Status{sv}
cfg.unsafeSync()
return
}
copied := make([]Status, n+1)
copy(copied[1:], cfg.Status)
copied[0] = sv
cfg.Status = copied
cfg.unsafeSync()
}
// ASG represents one ASG.
type ASG struct {
// Name is the ASG name.
Name string `json:"name"`
ASGCFNStackID string `json:"asg-cfn-stack-id" read-only:"true"`
ASGCFNStackYAMLFilePath string `json:"asg-cfn-stack-yaml-file-path" read-only:"true"`
TimeFrameCreate timeutil.TimeFrame `json:"time-frame-create" read-only:"true"`
TimeFrameDelete timeutil.TimeFrame `json:"time-frame-delete" read-only:"true"`
// RemoteAccessUserName is the user name used for running init scripts or SSH access.
RemoteAccessUserName string `json:"remote-access-user-name"`
// SSMDocumentCreate is true to auto-create and delete SSM document.
SSMDocumentCreate bool `json:"ssm-document-create"`
// SSMDocumentName is the name of SSM document.
SSMDocumentName string `json:"ssm-document-name"`
// SSMDocumentCFNStackName is the name of SSM document CFN stack.
SSMDocumentCFNStackName string `json:"ssm-document-cfn-stack-name"`
// SSMDocumentCommands is the commands for SSM document.
// Only used if SSM doc is created.
SSMDocumentCommands string `json:"ssm-document-commands"`
// SSMDocumentExecutionTimeoutSeconds is the SSM document execution timeout in seconds.
SSMDocumentExecutionTimeoutSeconds int `json:"ssm-document-execution-timeout-in-seconds"`
SSMDocumentCFNStackID string `json:"ssm-document-cfn-stack-id" read-only:"true"`
SSMDocumentCFNStackYAMLFilePath string `json:"ssm-document-cfn-stack-yaml-file-path" read-only:"true"`
SSMDocumentCommandIDs []string `json:"ssm-document-command-ids" read-only:"true"`
// TODO: support bootstrap arguments
// ref. https://github.com/awslabs/amazon-eks-ami/blob/master/amazon-eks-nodegroup.yaml
// AMIType is the AMI type for the node group.
// Allowed values are BOTTLEROCKET_x86_64, AL2_x86_64 and AL2_x86_64_GPU.
// ref. https://docs.aws.amazon.com/eks/latest/userguide/launch-workers.html
// ref. https://github.com/awslabs/amazon-eks-ami/blob/master/amazon-eks-nodegroup.yaml
AMIType string `json:"ami-type,omitempty"`
// ImageID is the Amazon Machine Image (AMI).
// This value overrides any AWS Systems Manager Parameter Store value.
ImageID string `json:"image-id"`
// ImageIDSSMParameter is the AWS Systems Manager Parameter Store
// parameter of the AMI ID.
ImageIDSSMParameter string `json:"image-id-ssm-parameter"`
// InstanceTypes is the list of EC2 instance types.
InstanceTypes []string `json:"instance-types"`
// VolumeSize is the size of the default volume, in GiB.
//
// Constraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned
// IOPS SSD (io1), 500-16384 for Throughput Optimized HDD (st1), 500-16384 for
// Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify
// a snapshot, the volume size must be equal to or larger than the snapshot
// size.
//
// Default: If you're creating the volume from a snapshot and don't specify
// a volume size, the default is the snapshot size.
VolumeSize int64 `json:"volume-size"`
// ASGMinSize is the minimum size of ASG.
ASGMinSize int64 `json:"asg-min-size,omitempty"`
// ASGMaxSize is the maximum size of ASG.
ASGMaxSize int64 `json:"asg-max-size,omitempty"`
// ASGDesiredCapacity is the desired capacity of ASG.
ASGDesiredCapacity int64 `json:"asg-desired-capacity,omitempty"`
// Instances is a map from instance ID to instance.
Instances map[string]Instance `json:"instanaces" read-only:"true"`
// Logs maps each instance ID to a list of log file paths fetched via SSH access.
Logs map[string][]string `json:"logs" read-only:"true"`
}
// Instance represents an EC2 instance.
type Instance struct {
Architecture string `json:"architecture"`
ImageID string `json:"image-id"`
IAMInstanceProfile IAMInstanceProfile `json:"iam-instance-profile"`
InstanceID string `json:"instance-id"`
InstanceType string `json:"instance-type"`
KeyName string `json:"key-name"`
Placement Placement `json:"placement"`
PrivateDNSName string `json:"private-dns-name"`
PrivateIP string `json:"private-ip"`
PublicDNSName string `json:"public-dns-name"`
PublicIP string `json:"public-ip"`
State State `json:"state"`
StateReason StateReason `json:"state-reason"`
StateTransitionReason string `json:"state-transition-reason"`
SubnetID string `json:"subnet-id"`
VPCID string `json:"vpc-id"`
CPUOptions CPUOptions `json:"cpu-options"`
BlockDeviceMappings []BlockDeviceMapping `json:"block-device-mappings"`
EBSOptimized bool `json:"ebs-optimized"`
RootDeviceName string `json:"root-device-name"`
RootDeviceType string `json:"root-device-type"`
SecurityGroups []SecurityGroup `json:"security-groups"`
LaunchTime time.Time `json:"launch-time"`
RemoteAccessUserName string `json:"remote-access-user-name"`
Hypervisor string `json:"hypervisor"`
VirtualizationType string `json:"virtualization-type"`
}
// IAMInstanceProfile is the IAM instance profile.
type IAMInstanceProfile struct {
// ARN is the Amazon Resource Name (ARN) of the instance profile.
ARN string `json:"arn"`
// ID is the ID of the instance profile.
ID string `json:"id"`
}
// CPUOptions represents the CPU of an EC2 instance.
type CPUOptions struct {
// CoreCount is the number of CPU cores for the instance.
CoreCount int64 `json:"core-count"`
// ThreadsPerCore is the number of threads per CPU core.
ThreadsPerCore int64 `json:"threads-per-core"`
}
// Placement defines EC2 placement.
type Placement struct {
AvailabilityZone string `json:"availability-zone"`
Tenancy string `json:"tenancy"`
}
// State defines an EC2 state.
type State struct {
Code int64 `json:"code"`
Name string `json:"name"`
}
// StateReason represents the EC2 state reason.
type StateReason struct {
Code string `json:"code"`
Message string `json:"message"`
}
// BlockDeviceMapping defines a block device mapping.
type BlockDeviceMapping struct {
DeviceName string `json:"device-name"`
EBS EBS `json:"ebs"`
}
// EBS defines an EBS volume.
type EBS struct {
DeleteOnTermination bool `json:"delete-on-termination"`
Status string `json:"status"`
VolumeID string `json:"volume-id"`
}
// SecurityGroup defines a security group.
type SecurityGroup struct {
GroupName string `json:"group-name"`
GroupID string `json:"group-id"`
}
// Load loads configuration from YAML.
// Useful when injecting shared configuration via ConfigMap.
//
// Example usage:
//
// import "github.com/aws/aws-k8s-tester/eksconfig"
// cfg := eksconfig.Load("test.yaml")
// err := cfg.ValidateAndSetDefaults()
//
// Do not set default values in this function.
// "ValidateAndSetDefaults" must be called separately,
// to prevent overwriting previous data when loaded from disks.
func Load(p string) (cfg *Config, err error) {
var d []byte
d, err = ioutil.ReadFile(p)
if err != nil {
return nil, err
}
cfg = new(Config)
if err = yaml.Unmarshal(d, cfg); err != nil {
return nil, err
}
cfg.mu = new(sync.RWMutex)
if cfg.ConfigPath != p {
cfg.ConfigPath = p
}
var ap string
ap, err = filepath.Abs(p)
if err != nil {
return nil, err
}
cfg.ConfigPath = ap
cfg.unsafeSync()
return cfg, nil
}
// Sync persists current configuration and states to disk.
func (cfg *Config) Sync() (err error) {
cfg.mu.Lock()
defer cfg.mu.Unlock()
return cfg.unsafeSync()
}
func (cfg *Config) unsafeSync() (err error) {
var p string
if cfg.ConfigPath != "" && !filepath.IsAbs(cfg.ConfigPath) {
p, err = filepath.Abs(cfg.ConfigPath)
if err != nil {
return fmt.Errorf("failed to 'filepath.Abs(%s)' %v", cfg.ConfigPath, err)
}
cfg.ConfigPath = p
}
var d []byte
d, err = yaml.Marshal(cfg)
if err != nil {
return fmt.Errorf("failed to 'yaml.Marshal' %v", err)
}
err = ioutil.WriteFile(cfg.ConfigPath, d, 0600)
if err != nil {
return fmt.Errorf("failed to write file %q (%v)", cfg.ConfigPath, err)
}
err = ioutil.WriteFile(cfg.RemoteAccessCommandsOutputPath, []byte(cmdTop+cfg.unsafeSSHCommands()), 0600)
if err != nil {
return fmt.Errorf("failed to write file %q (%v)", cfg.RemoteAccessCommandsOutputPath, err)
}
return nil
}
const cmdTop = `#!/bin/bash
set -e
set -x
`
// SSHCommands returns the SSH commands.
func (cfg *Config) SSHCommands() string {
cfg.mu.RLock()
defer cfg.mu.RUnlock()
return cfg.unsafeSSHCommands()
}
func (cfg *Config) unsafeSSHCommands() (s string) {
if len(cfg.ASGs) == 0 {
return ""
}
buf := bytes.NewBuffer(nil)
buf.WriteByte('\n')
for name, cur := range cfg.ASGs {
buf.WriteString("ASG name \"" + name + "\":\n")
buf.WriteString(cur.SSHCommands(cfg.Region, cfg.RemoteAccessPrivateKeyPath, cur.RemoteAccessUserName))
buf.WriteString("\n\n")
}
return buf.String()
}
// SSHCommands returns the SSH commands.
func (asg *ASG) SSHCommands(region string, keyPath string, userName string) (s string) {
if len(asg.Instances) == 0 {
return fmt.Sprintf("# empty ASG %q\n", asg.Name)
}
s = fmt.Sprintf(`
# change SSH key permission
chmod 400 %s
`, keyPath)
for _, v := range asg.Instances {
s += fmt.Sprintf(`# SSH into the remote machine (instance ID %q, public IP %q, private IP %q, public DNS %q)
ssh -o "StrictHostKeyChecking no" -i %s %s@%s
# download to local machine
scp -i %s %s@%s:REMOTE_FILE_PATH LOCAL_FILE_PATH
scp -i %s -r %s@%s:REMOTE_DIRECTORY_PATH LOCAL_DIRECTORY_PATH
# upload to remote machine
scp -i %s LOCAL_FILE_PATH %s@%s:REMOTE_FILE_PATH
scp -i %s -r LOCAL_DIRECTORY_PATH %s@%s:REMOTE_DIRECTORY_PATH
# SSM session (requires SSM agent)
aws ssm --region %s start-session --target %s
`,
v.InstanceID, v.PublicIP, v.PrivateIP, v.PublicDNSName,
keyPath, userName, v.PublicDNSName,
keyPath, userName, v.PublicDNSName,
keyPath, userName, v.PublicDNSName,
keyPath, userName, v.PublicDNSName,
keyPath, userName, v.PublicDNSName,
region, v.InstanceID,
)
}
return s
}
// ConvertInstance converts "aws ec2 describe-instances" to "config.Instance".
func ConvertInstance(iv *ec2.Instance) (instance Instance) {
instance = Instance{
Architecture: aws.StringValue(iv.Architecture),
ImageID: aws.StringValue(iv.ImageId),
InstanceID: aws.StringValue(iv.InstanceId),
InstanceType: aws.StringValue(iv.InstanceType),
KeyName: aws.StringValue(iv.KeyName),
PrivateDNSName: aws.StringValue(iv.PrivateDnsName),
PrivateIP: aws.StringValue(iv.PrivateIpAddress),
PublicDNSName: aws.StringValue(iv.PublicDnsName),
PublicIP: aws.StringValue(iv.PublicIpAddress),
StateTransitionReason: aws.StringValue(iv.StateTransitionReason),
SubnetID: aws.StringValue(iv.SubnetId),
VPCID: aws.StringValue(iv.VpcId),
BlockDeviceMappings: make([]BlockDeviceMapping, len(iv.BlockDeviceMappings)),
EBSOptimized: aws.BoolValue(iv.EbsOptimized),
RootDeviceName: aws.StringValue(iv.RootDeviceName),
RootDeviceType: aws.StringValue(iv.RootDeviceType),
SecurityGroups: make([]SecurityGroup, len(iv.SecurityGroups)),
LaunchTime: aws.TimeValue(iv.LaunchTime),
Hypervisor: aws.StringValue(iv.Hypervisor),
VirtualizationType: aws.StringValue(iv.VirtualizationType),
}
for j := range iv.BlockDeviceMappings {
instance.BlockDeviceMappings[j] = BlockDeviceMapping{
DeviceName: aws.StringValue(iv.BlockDeviceMappings[j].DeviceName),
EBS: EBS{
DeleteOnTermination: aws.BoolValue(iv.BlockDeviceMappings[j].Ebs.DeleteOnTermination),
Status: aws.StringValue(iv.BlockDeviceMappings[j].Ebs.Status),
VolumeID: aws.StringValue(iv.BlockDeviceMappings[j].Ebs.VolumeId),
},
}
}
for j := range iv.SecurityGroups {
instance.SecurityGroups[j] = SecurityGroup{
GroupName: aws.StringValue(iv.SecurityGroups[j].GroupName),
GroupID: aws.StringValue(iv.SecurityGroups[j].GroupId),
}
}
if iv.IamInstanceProfile != nil {
instance.IAMInstanceProfile = IAMInstanceProfile{
ARN: aws.StringValue(iv.IamInstanceProfile.Arn),
ID: aws.StringValue(iv.IamInstanceProfile.Id),
}
}
if iv.Placement != nil {
instance.Placement = Placement{
AvailabilityZone: aws.StringValue(iv.Placement.AvailabilityZone),
Tenancy: aws.StringValue(iv.Placement.Tenancy),
}
}
if iv.State != nil {
instance.State = State{
Code: aws.Int64Value(iv.State.Code),
Name: aws.StringValue(iv.State.Name),
}
}
if iv.StateReason != nil {
instance.StateReason = StateReason{
Code: aws.StringValue(iv.StateReason.Code),
Message: aws.StringValue(iv.StateReason.Message),
}
}
if iv.CpuOptions != nil {
instance.CPUOptions = CPUOptions{
CoreCount: aws.Int64Value(iv.CpuOptions.CoreCount),
ThreadsPerCore: aws.Int64Value(iv.CpuOptions.ThreadsPerCore),
}
}
return instance
} | sv := Status{Time: time.Now(), Status: status}
n := len(cfg.Status)
if n == 0 { |
rainbow.py | from PIL import Image
import numpy as np
import colorsys
import os, sys
import argparse
import matplotlib.pyplot as plt
rgb_to_hsv = np.vectorize(colorsys.rgb_to_hsv)
hsv_to_rgb = np.vectorize(colorsys.hsv_to_rgb)
def crop(image, box=None):
if box:
imageBox = box
else:
imageBox = image.getbbox()
return image.crop(imageBox)
def | (image, value):
im = image.convert('RGBA')
arr = np.array(np.asarray(im).astype(float))
r,g,b,a = np.rollaxis(arr, axis=-1)
# print(np.max(r))
h,s,v = rgb_to_hsv(r, g, b)
r, g, b = hsv_to_rgb((h + value/360.0) % 1.0, s, v)
arr = np.dstack((r, g, b, a))
# print(np.max(r))
# plt.imshow(arr.astype(int), aspect='auto')
# plt.show()
return Image.fromarray(arr.astype('uint8'), 'RGBA')
parser = argparse.ArgumentParser(description='Rainbow an image batch')
parser.add_argument('--filename', dest='filename', type=str)
parser.add_argument('--step', dest='step', type=float, default=5.0)
parser.add_argument('--max_step', dest='max_step', type=float, default=360.0)
args = parser.parse_args()
color_image = Image.open(args.filename)
basename = os.path.basename(args.filename)
base, ext = os.path.splitext(basename)
if not os.path.exists('anim'):
os.mkdir('anim')
for n in range(0, int(args.max_step/args.step)):
dtheta = n*args.step
print('Writing out', dtheta)
cropped = crop(color_image, (1620, 780, 2220, 1380))
new_im = hue_shift(cropped, dtheta)
new_fn = os.path.join('anim','{0}_{1}{2}'.format(base, n, ext))
n += 1
new_im.save(new_fn) | hue_shift |
training.py | import random
import json
import pickle
import numpy as np
import nltk
nltk.download('punkt')
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout
from tensorflow.keras.optimizers import SGD
lemmatizer = WordNetLemmatizer()
intents = json.loads(open('./intents.json').read())
words = []
classes = []
documents = []
ignore_letters = ['?', '!', '@', ',', ';', '.']
for intent in intents['intents']:
for pattern in intent['patterns']:
word_list = nltk.word_tokenize(pattern)
words.extend(word_list)
documents.append((word_list, intent['tag']))
if intent['tag'] not in classes:
classes.append(intent['tag'])
words = [lemmatizer.lemmatize(word) for word in words if word not in ignore_letters]
words = sorted(set(words))
classes = sorted(set(classes))
pickle.dump(words, open('words.pkl', 'wb'))
pickle.dump(classes, open('classes.pkl', 'wb'))
training = []
output_empty = [0] * len(classes)
for document in documents:
bag = []
word_patterns = document[0]
word_patterns = [lemmatizer.lemmatize(word.lower()) for word in word_patterns]
for word in word_patterns:
bag.append(1) if word in word_patterns else bag.append(0)
output_row = list(output_empty)
output_row[classes.index(document[1])] = 1
training.append([bag, output_row])
random.shuffle(training)
training = np.array(training)
train_x = list(training[:, 0])
train_y = list(training[:, 1])
model = Sequential()
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
| hist = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
model.save('chatbot_model.model.h5', hist)
print('Done') |
|
instastory_x3.py | # @x3raqe
#ممول محمد
"""QuotLy: Avaible commands: .انستا
"""
import datetime
import asyncio
from telethon import events
from telethon.errors.rpcerrorlist import YouBlockedUserError
from telethon.tl.functions.account import UpdateNotifySettingsRequest
from userbot.utils import admin_cmd
@borg.on(admin_cmd(pattern="ستوري ?(.*)"))
async def _(event):
if event.fwd_from: |
return
if not event.reply_to_msg_id:
await event.edit("``` ~ @X3RAQE - .```")
return
reply_message = await event.get_reply_message()
if not reply_message.text:
await event.edit("``` ~ @X3RAQE - ```")
return
chat = "@x3storybot"
sender = reply_message.sender
if reply_message.sender.bot:
await event.edit("``` ~ @X3RAQE - ```")
return
await event.edit("`جار ارسال لك التحميل من @x3storybot`")
async with event.client.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1077724863))
await event.client.forward_messages(chat, reply_message)
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock me (@x3storybot) u Nigga```")
return
if response.text.startswith("Hi!"):
await event.edit("```Can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await event.client.send_message(event.chat_id, response.message)
|
|
extend.py | import random
import base64
import io
import nacl.public
import lightnion as lnn
def circuit(state, descriptor):
onion_key = base64.b64decode(descriptor['ntor-onion-key'] + '====')
eidentity = descriptor['identity']['master-key'] # (assuming ed25519 here)
identity = base64.b64decode(descriptor['router']['identity'] + '====')
addr = descriptor['router']['address']
port = descriptor['router']['orport']
eph_key, hdata = lnn.crypto.ntor.hand(identity, onion_key)
payload = lnn.cell.relay.extend2.pack(
hdata, [(addr, port)], [identity, eidentity])
state = lnn.hop.send(state,
lnn.cell.relay.cmd.RELAY_EXTEND2, payload.raw, stream_id=0)
state, cells = lnn.hop.recv(state, once=True)
if not len(cells) == 1:
raise RuntimeError('Expected exactly one cell, got: {}'.format(cells))
if not cells[0].relay.cmd == lnn.cell.relay.cmd.RELAY_EXTENDED2:
raise RuntimeError('Expected EXTENDED2, got {} here: {}'.format(
cells[0].relay.cmd, cell.relay.truncated))
payload = lnn.cell.relay.extended2.payload(cells[0].relay.data)
if not payload.valid:
raise RuntimeError('Invalid EXTENDED2 payload: {}'.format(
payload.truncated))
raw_material = lnn.crypto.ntor.shake(eph_key, payload.data, identity,
onion_key, length=92)
material = lnn.crypto.ntor.kdf(raw_material) | extended = lnn.create.circuit(state.circuit.id, material)
state.wrap(lnn.onion.state(state.link, extended))
return state | |
__init__.py | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Utilities related to the ORM."""
__all__ = ('load_code', 'load_computer', 'load_group', 'load_node')
def load_entity(
entity_loader=None, identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True
):
# pylint: disable=too-many-arguments
"""
Load an entity instance by one of its identifiers: pk, uuid or label
If the type of the identifier is unknown simply pass it without a keyword and the loader will attempt to
automatically infer the type.
:param identifier: pk (integer), uuid (string) or label (string) of a Code
:param pk: pk of a Code
:param uuid: uuid of a Code, or the beginning of the uuid
:param label: label of a Code
:param sub_classes: an optional tuple of orm classes to narrow the queryset. Each class should be a strict sub class
of the ORM class of the given entity loader.
:param bool query_with_dashes: allow to query for a uuid with dashes
:returns: the Code instance
:raise ValueError: if none or more than one of the identifiers are supplied
:raise TypeError: if the provided identifier has the wrong type
:raise aiida.common.NotExistent: if no matching Code is found
:raise aiida.common.MultipleObjectsError: if more than one Code was found
"""
from aiida.orm.utils.loaders import OrmEntityLoader, IdentifierType
if entity_loader is None or not issubclass(entity_loader, OrmEntityLoader):
raise TypeError(f'entity_loader should be a sub class of {type(OrmEntityLoader)}')
inputs_provided = [value is not None for value in (identifier, pk, uuid, label)].count(True)
if inputs_provided == 0:
raise ValueError("one of the parameters 'identifier', pk', 'uuid' or 'label' has to be specified")
elif inputs_provided > 1:
raise ValueError("only one of parameters 'identifier', pk', 'uuid' or 'label' has to be specified")
if pk is not None:
if not isinstance(pk, int):
raise TypeError('a pk has to be an integer')
identifier = pk
identifier_type = IdentifierType.ID
elif uuid is not None:
if not isinstance(uuid, str):
raise TypeError('uuid has to be a string type')
identifier = uuid
identifier_type = IdentifierType.UUID
elif label is not None:
if not isinstance(label, str):
raise TypeError('label has to be a string type')
identifier = label
identifier_type = IdentifierType.LABEL
else:
identifier = str(identifier)
identifier_type = None
return entity_loader.load_entity(
identifier, identifier_type, sub_classes=sub_classes, query_with_dashes=query_with_dashes
)
def load_code(identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True):
"""
Load a Code instance by one of its identifiers: pk, uuid or label
If the type of the identifier is unknown simply pass it without a keyword and the loader will attempt to
automatically infer the type.
:param identifier: pk (integer), uuid (string) or label (string) of a Code
:param pk: pk of a Code
:param uuid: uuid of a Code, or the beginning of the uuid
:param label: label of a Code
:param sub_classes: an optional tuple of orm classes to narrow the queryset. Each class should be a strict sub class
of the ORM class of the given entity loader.
:param bool query_with_dashes: allow to query for a uuid with dashes
:return: the Code instance
:raise ValueError: if none or more than one of the identifiers are supplied
:raise TypeError: if the provided identifier has the wrong type
:raise aiida.common.NotExistent: if no matching Code is found
:raise aiida.common.MultipleObjectsError: if more than one Code was found
"""
from aiida.orm.utils.loaders import CodeEntityLoader
return load_entity(
CodeEntityLoader,
identifier=identifier,
pk=pk,
uuid=uuid,
label=label,
sub_classes=sub_classes,
query_with_dashes=query_with_dashes
)
def load_computer(identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True):
|
def load_group(identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True):
"""
Load a Group instance by one of its identifiers: pk, uuid or label
If the type of the identifier is unknown simply pass it without a keyword and the loader will attempt to
automatically infer the type.
:param identifier: pk (integer), uuid (string) or label (string) of a Group
:param pk: pk of a Group
:param uuid: uuid of a Group, or the beginning of the uuid
:param label: label of a Group
:param sub_classes: an optional tuple of orm classes to narrow the queryset. Each class should be a strict sub class
of the ORM class of the given entity loader.
:param bool query_with_dashes: allow to query for a uuid with dashes
:return: the Group instance
:raise ValueError: if none or more than one of the identifiers are supplied
:raise TypeError: if the provided identifier has the wrong type
:raise aiida.common.NotExistent: if no matching Group is found
:raise aiida.common.MultipleObjectsError: if more than one Group was found
"""
from aiida.orm.utils.loaders import GroupEntityLoader
return load_entity(
GroupEntityLoader,
identifier=identifier,
pk=pk,
uuid=uuid,
label=label,
sub_classes=sub_classes,
query_with_dashes=query_with_dashes
)
def load_node(identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True):
"""
Load a node by one of its identifiers: pk or uuid. If the type of the identifier is unknown
simply pass it without a keyword and the loader will attempt to infer the type
:param identifier: pk (integer) or uuid (string)
:param pk: pk of a node
:param uuid: uuid of a node, or the beginning of the uuid
:param label: label of a Node
:param sub_classes: an optional tuple of orm classes to narrow the queryset. Each class should be a strict sub class
of the ORM class of the given entity loader.
:param bool query_with_dashes: allow to query for a uuid with dashes
:returns: the node instance
:raise ValueError: if none or more than one of the identifiers are supplied
:raise TypeError: if the provided identifier has the wrong type
:raise aiida.common.NotExistent: if no matching Node is found
:raise aiida.common.MultipleObjectsError: if more than one Node was found
"""
from aiida.orm.utils.loaders import NodeEntityLoader
return load_entity(
NodeEntityLoader,
identifier=identifier,
pk=pk,
uuid=uuid,
label=label,
sub_classes=sub_classes,
query_with_dashes=query_with_dashes
)
| """
Load a Computer instance by one of its identifiers: pk, uuid or label
If the type of the identifier is unknown simply pass it without a keyword and the loader will attempt to
automatically infer the type.
:param identifier: pk (integer), uuid (string) or label (string) of a Computer
:param pk: pk of a Computer
:param uuid: uuid of a Computer, or the beginning of the uuid
:param label: label of a Computer
:param sub_classes: an optional tuple of orm classes to narrow the queryset. Each class should be a strict sub class
of the ORM class of the given entity loader.
:param bool query_with_dashes: allow to query for a uuid with dashes
:return: the Computer instance
:raise ValueError: if none or more than one of the identifiers are supplied
:raise TypeError: if the provided identifier has the wrong type
:raise aiida.common.NotExistent: if no matching Computer is found
:raise aiida.common.MultipleObjectsError: if more than one Computer was found
"""
from aiida.orm.utils.loaders import ComputerEntityLoader
return load_entity(
ComputerEntityLoader,
identifier=identifier,
pk=pk,
uuid=uuid,
label=label,
sub_classes=sub_classes,
query_with_dashes=query_with_dashes
) |
aggregate_context_test.go | package cqrs
import (
"testing"
)
func TestAggregateContext_AggregateId(t *testing.T) {
id := NewIntAggregateId(12)
ctx := NewAggregateContext(id, 0)
if ctx.AggregateId().String() != "12" {
t.Errorf("expected aggregateId %s but got %s", id.String(), ctx.AggregateId().String())
}
}
func TestAggregateContext_incrementVersion(t *testing.T) {
id := NewIntAggregateId(12)
ctx := NewAggregateContext(id, 123)
if ctx.Version() != 123 {
t.Errorf("expected version %d but got %d", 123, ctx.Version())
}
if ctx.OriginalVersion() != 123 {
t.Errorf("expected original version %d but got %d", 123, ctx.OriginalVersion())
}
ctx.incrementVersion()
if ctx.Version() != 124 {
t.Errorf("expected version %d but got %d", 123, ctx.Version())
}
if ctx.OriginalVersion() != 124 {
t.Errorf("expected original version %d but got %d", 123, ctx.OriginalVersion())
}
}
func TestAggregateContext_OrignalVersionShoulReturnCommitedVersion(t *testing.T) {
id := NewIntAggregateId(12)
ctx := NewAggregateContext(id, 123)
if ctx.Version() != 123 {
t.Errorf("expected version %d but got %d", 123, ctx.Version())
}
if ctx.OriginalVersion() != 123 {
t.Errorf("expected original version %d but got %d", 123, ctx.OriginalVersion())
}
ctx.StoreEvent(eventA{})
if ctx.OriginalVersion() != 123 {
t.Errorf("expected original version %d but got %d", 123, ctx.OriginalVersion())
}
if ctx.Version() != 124 {
t.Errorf("expected version %d but got %d", 124, ctx.Version())
}
}
| func TestAggregateContext_EventsHandling(t *testing.T) {
id := NewIntAggregateId(12)
ctx := NewAggregateContext(id, 0)
ctx.StoreEvent(&eventA{})
ctx.StoreEvent(&eventB{})
events := ctx.getUncommittedEvents()
if len(events) != 2 {
t.Fatalf("expected %d events but got %d", 2, len(events))
}
ctx.clearUncommittedEvents()
events = ctx.getUncommittedEvents()
if len(events) != 0 {
t.Fatalf("expected no events but got %d", len(events))
}
} | |
mira.go | package mira
import "net/http"
// Init is used
// when we initialize the Reddit instance,
// automatically start a goroutine that will
// update the token every 45 minutes. The
// auto_refresh should not be accessible to
// the end user as it is an internal method
func | (c Credentials) (*Reddit, error) {
auth, err := Authenticate(&c)
if err != nil {
return nil, err
}
auth.Client = &http.Client{}
auth.SetDefault()
go auth.auto_refresh()
return auth, nil
}
| Init |
routes.go | package main
import (
"github.com/cybersamx/to-do-go/app/models"
"html/template"
"net/http"
)
type notesTemplateData struct {
Notes []*models.Note
}
type noteTemplateData struct {
Note *models.Note
}
func notesHandler(app *App) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Mux supports both fix path and subtree path patterns ie. path ending w/o `/` and
// path that ends with `/` respectively. So a single `/` will match anything not
// handled by any handler. So we use a special guard treatment.
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
// Handle the notes page where user can create a note or view a list of notes.
if r.Method == http.MethodGet {
app.infoLog.Print("GET notes html page")
notes, err := app.noteModel.GetNotes()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
data := ¬esTemplateData{
Notes: notes,
}
files := []string{
"../html/notes-page.gohtml",
"../html/notes-component.gohtml",
"../html/base-layout.gohtml",
}
tpl, err := template.ParseFiles(files...)
if err != nil {
app.errLog.Print(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = tpl.Execute(w, data)
if err != nil {
app.errLog.Print(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
})
}
func parseForNote(app *App, r *http.Request) *models.Note {
nilNote := models.Note{
ID: "",
Title: "",
Text: "",
}
queryStr, ok := r.URL.Query()["noteID"]
if ok {
note := app.noteModel.GetNote(queryStr[0])
if note != nil {
return note
}
}
return &nilNote
}
func editNoteHandler(app *App) http.Handler |
func removeNoteHandler(app *App) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
app.infoLog.Print("POST remove note")
noteID := r.FormValue("noteID")
if noteID == "" {
http.Error(w, "No noteID", http.StatusBadRequest)
return
}
err := app.noteModel.RemoveNote(noteID)
if err != nil {
app.errLog.Print(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
app.infoLog.Print("deleted note with ID ", noteID)
http.Redirect(w, r, "/", http.StatusSeeOther)
}
})
}
| {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodGet {
app.infoLog.Print("GET create note html page")
// Get param from the URL
note := parseForNote(app, r)
data := noteTemplateData{
Note: note,
}
files := []string{
"../html/edit-note-page.gohtml",
"../html/base-layout.gohtml",
}
tpl, err := template.ParseFiles(files...)
if err != nil {
app.errLog.Print(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = tpl.Execute(w, data)
if err != nil {
app.errLog.Print(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
} else if r.Method == http.MethodPost {
app.infoLog.Print("POST create note html page")
title := r.FormValue("title")
text := r.FormValue("text")
noteID := r.FormValue("noteID")
note, err := app.noteModel.Upsert(noteID, title, text)
if err != nil {
app.errLog.Print(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if noteID == "" {
app.infoLog.Print("created note with ID ", note.ID)
} else {
app.infoLog.Print("updated note with ID ", note.ID)
}
http.Redirect(w, r, "/", http.StatusSeeOther)
}
})
} |
products-collection.js | 'use strict';
const Model = require('../model.js');
const productsSchema = require('./products-schema.js');
class | extends Model {
constructor(schema) {
super(schema);
}
}
module.exports = new Product(productsSchema); | Product |
current_thread.rs | #![deny(warnings, rust_2018_idioms)]
#![feature(async_await)]
use std::any::Any;
use std::cell::{Cell, RefCell};
use std::future::Future;
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll};
use std::thread;
use std::time::Duration;
use tokio_current_thread::{block_on_all, CurrentThread};
use tokio_executor::TypedExecutor;
use tokio_sync::oneshot;
mod from_block_on_all {
use super::*;
fn test<F: Fn(Pin<Box<dyn Future<Output = ()>>>) + 'static>(spawn: F) {
let cnt = Rc::new(Cell::new(0));
let c = cnt.clone();
let msg = tokio_current_thread::block_on_all(async move {
c.set(1 + c.get());
// Spawn!
spawn(Box::pin(async move {
c.set(1 + c.get());
}));
"hello"
});
assert_eq!(2, cnt.get());
assert_eq!(msg, "hello");
}
#[test]
fn spawn() {
test(tokio_current_thread::spawn)
}
#[test]
fn execute() {
test(|f| {
tokio_current_thread::TaskExecutor::current()
.spawn(f)
.unwrap();
});
}
}
#[test]
fn block_waits() {
let (tx, rx) = oneshot::channel();
thread::spawn(|| {
thread::sleep(Duration::from_millis(1000));
tx.send(()).unwrap();
});
let cnt = Rc::new(Cell::new(0));
let cnt2 = cnt.clone();
block_on_all(async move {
rx.await.unwrap();
cnt.set(1 + cnt.get());
});
assert_eq!(1, cnt2.get());
}
#[test]
fn spawn_many() {
const ITER: usize = 200;
let cnt = Rc::new(Cell::new(0));
let mut tokio_current_thread = CurrentThread::new();
for _ in 0..ITER {
let cnt = cnt.clone();
tokio_current_thread.spawn(async move {
cnt.set(1 + cnt.get());
});
}
tokio_current_thread.run().unwrap();
assert_eq!(cnt.get(), ITER);
}
mod does_not_set_global_executor_by_default {
use super::*;
fn test<F: Fn(Pin<Box<dyn Future<Output = ()> + Send>>) -> Result<(), E> + 'static, E>(
spawn: F,
) {
block_on_all(async {
spawn(Box::pin(async {})).unwrap_err();
});
}
#[test]
fn spawn() {
test(|f| tokio_executor::DefaultExecutor::current().spawn(f))
}
}
mod from_block_on_future {
use super::*;
fn test<F: Fn(Pin<Box<dyn Future<Output = ()>>>)>(spawn: F) {
let cnt = Rc::new(Cell::new(0));
let cnt2 = cnt.clone();
let mut tokio_current_thread = CurrentThread::new();
tokio_current_thread.block_on(async move {
let cnt3 = cnt2.clone();
spawn(Box::pin(async move {
cnt3.set(1 + cnt3.get());
}));
});
tokio_current_thread.run().unwrap();
assert_eq!(1, cnt.get());
}
#[test]
fn spawn() {
test(tokio_current_thread::spawn);
}
#[test]
fn execute() {
test(|f| {
tokio_current_thread::TaskExecutor::current()
.spawn(f)
.unwrap();
});
}
}
mod outstanding_tasks_are_dropped_when_executor_is_dropped {
use super::*;
async fn never(_rc: Rc<()>) {
loop {
yield_once().await;
}
}
fn test<F, G>(spawn: F, dotspawn: G)
where
F: Fn(Pin<Box<dyn Future<Output = ()>>>) + 'static,
G: Fn(&mut CurrentThread, Pin<Box<dyn Future<Output = ()>>>),
{
let mut rc = Rc::new(());
let mut tokio_current_thread = CurrentThread::new();
dotspawn(&mut tokio_current_thread, Box::pin(never(rc.clone())));
drop(tokio_current_thread);
// Ensure the daemon is dropped
assert!(Rc::get_mut(&mut rc).is_some());
// Using the global spawn fn
let mut rc = Rc::new(());
let rc2 = rc.clone();
let mut tokio_current_thread = CurrentThread::new();
tokio_current_thread.block_on(async move {
spawn(Box::pin(never(rc2)));
});
drop(tokio_current_thread);
// Ensure the daemon is dropped
assert!(Rc::get_mut(&mut rc).is_some());
}
#[test]
fn spawn() {
test(tokio_current_thread::spawn, |rt, f| {
rt.spawn(f);
})
}
#[test]
fn execute() {
test(
|f| {
tokio_current_thread::TaskExecutor::current()
.spawn(f)
.unwrap();
},
// Note: `CurrentThread` doesn't currently implement
// `futures::Executor`, so we'll call `.spawn(...)` rather than
// `.execute(...)` for now. If `CurrentThread` is changed to
// implement Executor, change this to `.execute(...).unwrap()`.
|rt, f| {
rt.spawn(f);
},
);
}
}
#[test]
#[should_panic]
fn nesting_run() {
block_on_all(async {
block_on_all(async {});
});
}
mod run_in_future {
use super::*;
#[test]
#[should_panic]
fn spawn() {
block_on_all(async {
tokio_current_thread::spawn(async {
block_on_all(async {});
});
});
}
#[test]
#[should_panic]
fn execute() {
block_on_all(async {
tokio_current_thread::TaskExecutor::current()
.spawn(async {
block_on_all(async {});
})
.unwrap();
});
}
}
#[test]
fn tick_on_infini_future() {
let num = Rc::new(Cell::new(0));
async fn infini(num: Rc<Cell<usize>>) {
loop {
num.set(1 + num.get());
yield_once().await
}
}
CurrentThread::new()
.spawn(infini(num.clone()))
.turn(None)
.unwrap();
assert_eq!(1, num.get());
}
mod tasks_are_scheduled_fairly {
use super::*;
async fn spin(state: Rc<RefCell<[i32; 2]>>, idx: usize) {
loop {
// borrow_mut scope
{
let mut state = state.borrow_mut();
if idx == 0 {
let diff = state[0] - state[1];
assert!(diff.abs() <= 1);
if state[0] >= 50 {
return;
}
}
state[idx] += 1;
if state[idx] >= 100 {
return;
}
}
yield_once().await;
}
}
fn test<F: Fn(Pin<Box<dyn Future<Output = ()>>>)>(spawn: F) {
let state = Rc::new(RefCell::new([0, 0]));
block_on_all(async move {
spawn(Box::pin(spin(state.clone(), 0)));
spawn(Box::pin(spin(state, 1)));
});
}
#[test]
fn spawn() {
test(tokio_current_thread::spawn)
}
#[test]
fn execute() {
test(|f| {
tokio_current_thread::TaskExecutor::current()
.spawn(f)
.unwrap();
})
}
}
mod and_turn {
use super::*;
fn test<F, G>(spawn: F, dotspawn: G)
where
F: Fn(Pin<Box<dyn Future<Output = ()>>>) + 'static,
G: Fn(&mut CurrentThread, Pin<Box<dyn Future<Output = ()>>>),
{
let cnt = Rc::new(Cell::new(0));
let c = cnt.clone();
let mut tokio_current_thread = CurrentThread::new();
// Spawn a basic task to get the executor to turn
dotspawn(&mut tokio_current_thread, Box::pin(async {}));
// Turn once...
tokio_current_thread.turn(None).unwrap();
dotspawn(
&mut tokio_current_thread,
Box::pin(async move {
c.set(1 + c.get());
// Spawn!
spawn(Box::pin(async move {
c.set(1 + c.get());
}));
}),
);
// This does not run the newly spawned thread
tokio_current_thread.turn(None).unwrap();
assert_eq!(1, cnt.get());
// This runs the newly spawned thread
tokio_current_thread.turn(None).unwrap();
assert_eq!(2, cnt.get());
}
#[test]
fn spawn() {
test(tokio_current_thread::spawn, |rt, f| {
rt.spawn(f);
})
}
#[test]
fn execute() {
test(
|f| {
tokio_current_thread::TaskExecutor::current()
.spawn(f)
.unwrap();
},
// Note: `CurrentThread` doesn't currently implement
// `futures::Executor`, so we'll call `.spawn(...)` rather than
// `.execute(...)` for now. If `CurrentThread` is changed to
// implement Executor, change this to `.execute(...).unwrap()`.
|rt, f| {
rt.spawn(f);
},
);
}
}
mod in_drop {
use super::*;
struct OnDrop<F: FnOnce()>(Option<F>);
impl<F: FnOnce()> Drop for OnDrop<F> {
fn drop(&mut self) {
(self.0.take().unwrap())();
}
}
async fn noop(_data: Box<dyn Any>) {}
fn test<F, G>(spawn: F, dotspawn: G)
where
F: Fn(Pin<Box<dyn Future<Output = ()>>>) + 'static,
G: Fn(&mut CurrentThread, Pin<Box<dyn Future<Output = ()>>>),
{
let mut tokio_current_thread = CurrentThread::new();
let (tx, rx) = oneshot::channel();
dotspawn(
&mut tokio_current_thread,
Box::pin(noop(Box::new(OnDrop(Some(move || {
spawn(Box::pin(async move {
tx.send(()).unwrap();
}));
}))))),
);
tokio_current_thread.block_on(rx).unwrap();
tokio_current_thread.run().unwrap();
}
#[test]
fn spawn() {
test(tokio_current_thread::spawn, |rt, f| {
rt.spawn(f);
})
}
#[test]
fn execute() {
test(
|f| {
tokio_current_thread::TaskExecutor::current()
.spawn(f)
.unwrap();
},
// Note: `CurrentThread` doesn't currently implement
// `futures::Executor`, so we'll call `.spawn(...)` rather than
// `.execute(...)` for now. If `CurrentThread` is changed to
// implement Executor, change this to `.execute(...).unwrap()`.
|rt, f| {
rt.spawn(f);
},
);
}
}
/*
#[test]
fn hammer_turn() {
use futures::sync::mpsc;
const ITER: usize = 100;
const N: usize = 100;
const THREADS: usize = 4;
for _ in 0..ITER {
let mut ths = vec![];
// Add some jitter
for _ in 0..THREADS {
let th = thread::spawn(|| {
let mut tokio_current_thread = CurrentThread::new();
let (tx, rx) = mpsc::unbounded();
tokio_current_thread.spawn({
let cnt = Rc::new(Cell::new(0));
let c = cnt.clone();
rx.for_each(move |_| {
c.set(1 + c.get());
Ok(())
})
.map_err(|e| panic!("err={:?}", e))
.map(move |v| {
assert_eq!(N, cnt.get());
v
})
});
thread::spawn(move || {
for _ in 0..N {
tx.unbounded_send(()).unwrap();
thread::yield_now();
}
});
while !tokio_current_thread.is_idle() {
tokio_current_thread.turn(None).unwrap();
}
});
ths.push(th);
}
for th in ths {
th.join().unwrap();
}
}
}
*/
#[test]
fn turn_has_polled() {
let mut tokio_current_thread = CurrentThread::new();
// Spawn oneshot receiver
let (sender, receiver) = oneshot::channel::<()>();
tokio_current_thread.spawn(async move {
let _ = receiver.await;
});
// Turn once...
let res = tokio_current_thread
.turn(Some(Duration::from_millis(0)))
.unwrap();
// Should've polled the receiver once, but considered it not ready
assert!(res.has_polled());
// Turn another time
let res = tokio_current_thread
.turn(Some(Duration::from_millis(0)))
.unwrap();
// Should've polled nothing, the receiver is not ready yet
assert!(!res.has_polled());
// Make the receiver ready
sender.send(()).unwrap();
// Turn another time
let res = tokio_current_thread
.turn(Some(Duration::from_millis(0)))
.unwrap();
// Should've polled the receiver, it's ready now
assert!(res.has_polled());
// Now the executor should be empty
assert!(tokio_current_thread.is_idle());
let res = tokio_current_thread
.turn(Some(Duration::from_millis(0)))
.unwrap();
// So should've polled nothing
assert!(!res.has_polled());
}
// Our own mock Park that is never really waiting and the only
// thing it does is to send, on request, something (once) to a oneshot
// channel
struct MyPark {
sender: Option<oneshot::Sender<()>>,
send_now: Rc<Cell<bool>>,
}
struct MyUnpark;
impl tokio_executor::park::Park for MyPark {
type Unpark = MyUnpark;
type Error = ();
fn unpark(&self) -> Self::Unpark {
MyUnpark
}
fn park(&mut self) -> Result<(), Self::Error> {
// If called twice with send_now, this will intentionally panic
if self.send_now.get() {
self.sender.take().unwrap().send(()).unwrap();
}
Ok(())
}
fn park_timeout(&mut self, _duration: Duration) -> Result<(), Self::Error> {
self.park()
}
}
impl tokio_executor::park::Unpark for MyUnpark {
fn unpark(&self) {}
}
#[test]
fn turn_fair() {
let send_now = Rc::new(Cell::new(false));
let (sender, receiver) = oneshot::channel::<()>();
let (sender_2, receiver_2) = oneshot::channel::<()>();
let (sender_3, receiver_3) = oneshot::channel::<()>();
let my_park = MyPark {
sender: Some(sender_3),
send_now: send_now.clone(),
};
let mut tokio_current_thread = CurrentThread::new_with_park(my_park);
let receiver_1_done = Rc::new(Cell::new(false));
let receiver_1_done_clone = receiver_1_done.clone();
// Once an item is received on the oneshot channel, it will immediately
// immediately make the second oneshot channel ready
tokio_current_thread.spawn(async move {
receiver.await.unwrap();
sender_2.send(()).unwrap();
receiver_1_done_clone.set(true);
});
let receiver_2_done = Rc::new(Cell::new(false));
let receiver_2_done_clone = receiver_2_done.clone();
tokio_current_thread.spawn(async move {
receiver_2.await.unwrap();
receiver_2_done_clone.set(true);
});
// The third receiver is only woken up from our Park implementation, it simulates
// e.g. a socket that first has to be polled to know if it is ready now
let receiver_3_done = Rc::new(Cell::new(false));
let receiver_3_done_clone = receiver_3_done.clone();
tokio_current_thread.spawn(async move {
receiver_3.await.unwrap();
receiver_3_done_clone.set(true);
});
// First turn should've polled both and considered them not ready
let res = tokio_current_thread
.turn(Some(Duration::from_millis(0)))
.unwrap();
assert!(res.has_polled());
// Next turn should've polled nothing
let res = tokio_current_thread
.turn(Some(Duration::from_millis(0)))
.unwrap();
assert!(!res.has_polled());
assert!(!receiver_1_done.get());
assert!(!receiver_2_done.get());
assert!(!receiver_3_done.get());
// After this the receiver future will wake up the second receiver future,
// so there are pending futures again
sender.send(()).unwrap();
// Now the first receiver should be done, the second receiver should be ready
// to be polled again and the socket not yet
let res = tokio_current_thread.turn(None).unwrap();
assert!(res.has_polled());
assert!(receiver_1_done.get());
assert!(!receiver_2_done.get());
assert!(!receiver_3_done.get());
// Now let our park implementation know that it should send something to sender 3
send_now.set(true);
// This should resolve the second receiver directly, but also poll the socket
// and read the packet from it. If it didn't do both here, we would handle
// futures that are woken up from the reactor and directly unfairly and would
// favour the ones that are woken up directly.
let res = tokio_current_thread.turn(None).unwrap();
assert!(res.has_polled());
assert!(receiver_1_done.get());
assert!(receiver_2_done.get());
assert!(receiver_3_done.get());
// Don't send again
send_now.set(false);
// Now we should be idle and turning should not poll anything
assert!(tokio_current_thread.is_idle());
let res = tokio_current_thread.turn(None).unwrap();
assert!(!res.has_polled());
}
#[test]
fn spawn_from_other_thread() {
let mut current_thread = CurrentThread::new();
let handle = current_thread.handle();
let (sender, receiver) = oneshot::channel::<()>();
thread::spawn(move || {
handle
.spawn(async move {
sender.send(()).unwrap();
})
.unwrap();
});
let _ = current_thread.block_on(receiver).unwrap();
}
#[test]
fn spawn_from_other_thread_unpark() {
use std::sync::mpsc::channel as mpsc_channel;
let mut current_thread = CurrentThread::new();
let handle = current_thread.handle();
let (sender_1, receiver_1) = oneshot::channel::<()>();
let (sender_2, receiver_2) = mpsc_channel::<()>();
thread::spawn(move || {
let _ = receiver_2.recv().unwrap();
handle
.spawn(async move {
sender_1.send(()).unwrap();
})
.unwrap();
});
// Ensure that unparking the executor works correctly. It will first
// check if there are new futures (there are none), then execute the
// lazy future below which will cause the future to be spawned from
// the other thread. Then the executor will park but should be woken
// up because *now* we have a new future to schedule
let _ = current_thread.block_on(async move {
// inlined 'lazy'
async move {
sender_2.send(()).unwrap();
}
.await;
receiver_1.await.unwrap();
});
}
#[test]
fn spawn_from_executor_with_handle() {
let mut current_thread = CurrentThread::new();
let handle = current_thread.handle();
let (tx, rx) = oneshot::channel();
current_thread.spawn(async move {
handle
.spawn(async move {
tx.send(()).unwrap();
})
.unwrap();
});
current_thread.block_on(rx).unwrap();
}
async fn | () {
YieldOnce(false).await
}
struct YieldOnce(bool);
impl Future for YieldOnce {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
if self.0 {
Poll::Ready(())
} else {
self.0 = true;
// Push to the back of the executor's queue
cx.waker().wake_by_ref();
Poll::Pending
}
}
}
| yield_once |
FollowButton.js | $(function () {
var initFollowState = Boolean($('.initial-is-followed-by').text()), // フォローボタンの初期状態
followBtn = $('#follow-button'),
followIcon = $('#follow-icon'),
followState = $('#follow-state'),
countFollowings = $('#count-followings'),
countFollowers = $('#count-followers'),
authorized = Boolean($('.authorized').text()), // ログイン状態
url, // 送信先
reqType; // HTTPメソッドの種類
// 初期状態のアイコン状態の設定
if (initFollowState) {
followBtn.toggleClass('active');
followIcon.removeClass("fa-user-plus");
followIcon.addClass("fa-user-check");
followState.text('フォロー中');
} else {
followIcon.removeClass("fa-user-check");
followIcon.addClass("fa-user-plus");
followState.text('フォロー');
}
$.ajaxSetup({
headers: {
'X-CSRF-TOKEN': $('meta[name="token"]').attr('content')
}
}); | event.preventDefault();
if(authorized) {
url = $(this).attr('href');
// 状態の読み取り
if (followBtn.hasClass('active')) {
reqType = 'DELETE';
} else {
reqType = 'PUT';
}
$.ajax({
dataType: "json",
type: reqType,
url: url,
})
// 通信に成功した際の処理
.done(function(data, status) {
// フォローボタン、アイコンの表示変更
followBtn.toggleClass('active');
followIcon.toggleClass('fa-user-plus');
followIcon.toggleClass('fa-user-check');
if(followState.text() === 'フォロー中') {
followState.text('フォロー');
} else {
followState.text('フォロー中');
}
// フォロー数、フォロワー数の表示変更
countFollowings.text(data.countFollowings + ' フォロー');
countFollowers.text(data.countFollowers + ' フォロワー');
})
// 通信に失敗した際の処理
.fail(function(jqXHR, textStatus, errorThrown) {
console.log("失敗");
console.log("エラー:" + textStatus);
console.log("テキスト:" + jqXHR.responseText);
})
}
});
// ------------------- モーダルウィンドウの表示 ------------------
followBtn.on('click', function(event) {
event.preventDefault();
$('#modal').modal();
});
}); |
followBtn.click(function(event) { |
diagrams.py | from transitions import Transition
from transitions.extensions.markup import MarkupMachine
from transitions.core import listify
import warnings
import logging
from functools import partial
_LOGGER = logging.getLogger(__name__)
_LOGGER.addHandler(logging.NullHandler())
# make deprecation warnings of transition visible for module users
warnings.filterwarnings(action='default', message=r".*transitions version.*")
# this is a workaround for dill issues when partials and super is used in conjunction
# without it, Python 3.0 - 3.3 will not support pickling
# https://github.com/pytransitions/transitions/issues/236
_super = super
class | (Transition):
""" Transition used in conjunction with (Nested)Graphs to update graphs whenever a transition is
conducted.
"""
def _change_state(self, event_data):
graph = event_data.machine.model_graphs[event_data.model]
graph.reset_styling()
graph.set_previous_transition(self.source, self.dest)
_super(TransitionGraphSupport, self)._change_state(event_data) # pylint: disable=protected-access
class GraphMachine(MarkupMachine):
""" Extends transitions.core.Machine with graph support.
Is also used as a mixin for HierarchicalMachine.
Attributes:
_pickle_blacklist (list): Objects that should not/do not need to be pickled.
transition_cls (cls): TransitionGraphSupport
"""
_pickle_blacklist = ['model_graphs']
transition_cls = TransitionGraphSupport
machine_attributes = {
'directed': 'true',
'strict': 'false',
'rankdir': 'LR',
}
hierarchical_machine_attributes = {
'rankdir': 'TB',
'rank': 'source',
'nodesep': '1.5',
'compound': 'true'
}
style_attributes = {
'node': {
'': {},
'default': {
'shape': 'rectangle',
'style': 'rounded, filled',
'fillcolor': 'white',
'color': 'black',
'peripheries': '1'
},
'active': {
'color': 'red',
'fillcolor': 'darksalmon',
'peripheries': '2'
},
'previous': {
'color': 'blue',
'fillcolor': 'azure2',
'peripheries': '1'
}
},
'edge': {
'': {},
'default': {
'color': 'black'
},
'previous': {
'color': 'blue'
}
},
'graph': {
'': {},
'default': {
'color': 'black',
'fillcolor': 'white'
},
'previous': {
'color': 'blue',
'fillcolor': 'azure2',
'style': 'filled'
},
'active': {
'color': 'red',
'fillcolor': 'darksalmon',
'style': 'filled'
},
}
}
# model_graphs cannot be pickled. Omit them.
def __getstate__(self):
# self.pkl_graphs = [(g.markup, g.custom_styles) for g in self.model_graphs]
return {k: v for k, v in self.__dict__.items() if k not in self._pickle_blacklist}
def __setstate__(self, state):
self.__dict__.update(state)
self.model_graphs = {} # reinitialize new model_graphs
for model in self.models:
try:
_ = self._get_graph(model, title=self.title)
except AttributeError as e:
_LOGGER.warning("Graph for model could not be initialized after pickling: %s", e)
def __init__(self, *args, **kwargs):
# remove graph config from keywords
self.title = kwargs.pop('title', 'State Machine')
self.show_conditions = kwargs.pop('show_conditions', False)
self.show_state_attributes = kwargs.pop('show_state_attributes', False)
# in MarkupMachine this switch is called 'with_auto_transitions'
# keep 'auto_transitions_markup' for backwards compatibility
kwargs['auto_transitions_markup'] = kwargs.get('auto_transitions_markup', False) or \
kwargs.pop('show_auto_transitions', False)
self.model_graphs = {}
# determine graph engine; if pygraphviz cannot be imported, fall back to graphviz
use_pygraphviz = kwargs.pop('use_pygraphviz', True)
if use_pygraphviz:
try:
import pygraphviz
except ImportError:
use_pygraphviz = False
self.graph_cls = self._init_graphviz_engine(use_pygraphviz)
_LOGGER.debug("Using graph engine %s", self.graph_cls)
_super(GraphMachine, self).__init__(*args, **kwargs)
# for backwards compatibility assign get_combined_graph to get_graph
# if model is not the machine
if not hasattr(self, 'get_graph'):
setattr(self, 'get_graph', self.get_combined_graph)
def _init_graphviz_engine(self, use_pygraphviz):
if use_pygraphviz:
try:
if hasattr(self.state_cls, 'separator'):
from .diagrams_pygraphviz import NestedGraph as Graph
self.machine_attributes.update(self.hierarchical_machine_attributes)
else:
from .diagrams_pygraphviz import Graph
return Graph
except ImportError:
pass
if hasattr(self.state_cls, 'separator'):
from .diagrams_graphviz import NestedGraph as Graph
self.machine_attributes.update(self.hierarchical_machine_attributes)
else:
from .diagrams_graphviz import Graph
return Graph
def _get_graph(self, model, title=None, force_new=False, show_roi=False):
if force_new:
grph = self.graph_cls(self, title=title if title is not None else self.title)
self.model_graphs[model] = grph
try:
self.model_graphs[model].set_node_style(getattr(model, self.model_attribute), 'active')
except AttributeError:
_LOGGER.info("Could not set active state of diagram")
try:
m = self.model_graphs[model]
except KeyError:
_ = self._get_graph(model, title, force_new=True)
m = self.model_graphs[model]
m.roi_state = getattr(model, self.model_attribute) if show_roi else None
return m.get_graph(title=title)
def get_combined_graph(self, title=None, force_new=False, show_roi=False):
""" This method is currently equivalent to 'get_graph' of the first machine's model.
In future releases of transitions, this function will return a combined graph with active states
of all models.
Args:
title (str): Title of the resulting graph.
force_new (bool): If set to True, (re-)generate the model's graph.
show_roi (bool): If set to True, only render states that are active and/or can be reached from
the current state.
Returns: AGraph of the first machine's model.
"""
_LOGGER.info('Returning graph of the first model. In future releases, this '
'method will return a combined graph of all models.')
return self._get_graph(self.models[0], title, force_new, show_roi)
def add_model(self, model, initial=None):
models = listify(model)
super(GraphMachine, self).add_model(models, initial)
for mod in models:
mod = self if mod == 'self' else mod
if hasattr(mod, 'get_graph'):
raise AttributeError('Model already has a get_graph attribute. Graph retrieval cannot be bound.')
setattr(mod, 'get_graph', partial(self._get_graph, mod))
_ = mod.get_graph(title=self.title, force_new=True) # initialises graph
def add_states(self, states, on_enter=None, on_exit=None,
ignore_invalid_triggers=None, **kwargs):
""" Calls the base method and regenerates all models's graphs. """
_super(GraphMachine, self).add_states(states, on_enter=on_enter, on_exit=on_exit,
ignore_invalid_triggers=ignore_invalid_triggers, **kwargs)
for model in self.models:
model.get_graph(force_new=True)
def add_transition(self, trigger, source, dest, conditions=None,
unless=None, before=None, after=None, prepare=None, **kwargs):
""" Calls the base method and regenerates all models's graphs. """
_super(GraphMachine, self).add_transition(trigger, source, dest, conditions=conditions, unless=unless,
before=before, after=after, prepare=prepare, **kwargs)
for model in self.models:
model.get_graph(force_new=True)
class BaseGraph(object):
def __init__(self, machine, title=None):
self.machine = machine
self.fsm_graph = None
self.roi_state = None
self.generate(title)
def _convert_state_attributes(self, state):
label = state.get('label', state['name'])
if self.machine.show_state_attributes:
if 'tags' in state:
label += ' [' + ', '.join(state['tags']) + ']'
if 'on_enter' in state:
label += r'\l- enter:\l + ' + r'\l + '.join(state['on_enter'])
if 'on_exit' in state:
label += r'\l- exit:\l + ' + r'\l + '.join(state['on_exit'])
if 'timeout' in state:
label += r'\l- timeout(' + state['timeout'] + 's) -> (' + ', '.join(state['on_timeout']) + ')'
return label
def _transition_label(self, tran):
edge_label = tran.get('label', tran['trigger'])
if 'dest' not in tran:
edge_label += " [internal]"
if self.machine.show_conditions and any(prop in tran for prop in ['conditions', 'unless']):
x = '{edge_label} [{conditions}]'.format(
edge_label=edge_label,
conditions=' & '.join(tran.get('conditions', []) + ['!' + u for u in tran.get('unless', [])]),
)
return x
return edge_label
def _get_global_name(self, path):
if path:
state = path.pop(0)
with self.machine(state):
return self._get_global_name(path)
else:
return self.machine.get_global_name()
| TransitionGraphSupport |
plan_expression_monotonicity.rs | // Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_datavalues::prelude::DataColumn;
use common_datavalues::prelude::DataColumnWithField;
use common_datavalues::DataField;
use common_datavalues::DataSchemaRefExt;
use common_datavalues::DataType;
use common_datavalues::DataValue;
use common_exception::Result;
use common_functions::scalars::Monotonicity;
use common_planners::*;
struct Test {
name: &'static str,
expr: Expression,
column: &'static str,
left: Option<DataColumnWithField>,
right: Option<DataColumnWithField>,
expect_mono: Monotonicity,
error: &'static str,
}
fn create_f64(d: f64) -> Option<DataColumnWithField> {
let data_field = DataField::new("x", DataType::Float64, false);
let data_column = DataColumn::Constant(DataValue::Float64(Some(d)), 1);
Some(DataColumnWithField::new(data_column, data_field))
}
fn create_u8(d: u8) -> Option<DataColumnWithField> {
let data_field = DataField::new("x", DataType::UInt8, false);
let data_column = DataColumn::Constant(DataValue::UInt8(Some(d)), 1);
Some(DataColumnWithField::new(data_column, data_field))
}
fn create_datetime(d: u32) -> Option<DataColumnWithField> {
let data_field = DataField::new("x", DataType::DateTime32(None), false);
let data_column = DataColumn::Constant(DataValue::UInt32(Some(d)), 1);
Some(DataColumnWithField::new(data_column, data_field))
}
fn verify_test(t: Test) -> Result<()> {
let schema = DataSchemaRefExt::create(vec![
DataField::new("x", DataType::Float64, false),
DataField::new("y", DataType::Int64, false),
DataField::new("z", DataType::DateTime32(None), false),
]);
let mono = match ExpressionMonotonicityVisitor::check_expression(
schema, &t.expr, t.left, t.right, t.column,
) {
Ok(mono) => mono,
Err(e) => {
assert_eq!(t.error, e.to_string(), "{}", t.name);
return Ok(());
}
};
assert_eq!(
mono.is_monotonic, t.expect_mono.is_monotonic,
"{} is_monotonic",
t.name
);
assert_eq!(
mono.is_constant, t.expect_mono.is_constant,
"{} is_constant",
t.name
);
if t.expect_mono.is_monotonic {
assert_eq!(
mono.is_positive, t.expect_mono.is_positive,
"{} is_positive",
t.name
);
}
if t.expect_mono.is_monotonic || t.expect_mono.is_constant {
let left = mono.left;
let right = mono.right;
let expected_left = t.expect_mono.left;
let expected_right = t.expect_mono.right;
if expected_left.is_none() {
assert!(left.is_none(), "{} left", t.name);
} else {
let left_val = left.unwrap().column().try_get(0)?;
let expected_left_val = expected_left.unwrap().column().try_get(0)?;
assert!(left_val == expected_left_val, "{}", t.name);
}
if expected_right.is_none() {
assert!(right.is_none(), "{} right", t.name);
} else {
let right_val = right.unwrap().column().try_get(0)?;
let expected_right_val = expected_right.unwrap().column().try_get(0)?;
assert!(right_val == expected_right_val, "{}", t.name);
}
}
Ok(())
}
#[test]
fn test_arithmetic_plus_minus() -> Result<()> {
let test_suite = vec![
Test {
name: "f(x) = x + 12",
expr: Expression::create_binary_expression("+", vec![col("x"), lit(12i32)]),
column: "x",
left: None,
right: None,
expect_mono: Monotonicity {
is_monotonic: true,
is_positive: true,
is_constant: false,
left: None,
right: None,
},
error: "",
},
Test {
name: "f(x) = -x + 12",
expr: Expression::create_binary_expression("+", vec![
Expression::create_unary_expression("negate", vec![col("x")]),
lit(12i32),
]),
column: "x",
left: None,
right: None,
expect_mono: Monotonicity {
is_monotonic: true,
is_positive: false,
is_constant: false,
left: None,
right: None,
},
error: "",
},
Test {
name: "f(x,y) = x + y", // multi-variable function is not supported,
expr: Expression::create_binary_expression("+", vec![col("x"), col("y")]),
column: "",
left: None,
right: None,
expect_mono: Monotonicity {
is_monotonic: false,
is_positive: true,
is_constant: false,
left: None,
right: None,
},
error: "Code: 6, displayText = Multi-column expressions are not currently supported.",
},
Test {
name: "f(x) = (-x + 12) - x + (1 - x)",
expr: Expression::create_binary_expression("+", vec![
Expression::create_binary_expression("-", vec![
// -x + 12
Expression::create_binary_expression("+", vec![
Expression::create_unary_expression("negate", vec![col("x")]),
lit(12i32),
]),
col("x"),
]),
// 1 - x
Expression::create_unary_expression("negate", vec![lit(1i64), col("x")]),
]),
column: "x",
left: None,
right: None,
expect_mono: Monotonicity {
is_monotonic: true,
is_positive: false,
is_constant: false,
left: None,
right: None,
},
error: "",
},
Test {
name: "f(x) = (x + 12) - x + (1 - x)",
expr: Expression::create_binary_expression("+", vec![
Expression::create_binary_expression("-", vec![
// x + 12
Expression::create_binary_expression("+", vec![col("x"), lit(12i32)]),
col("x"),
]),
// 1 - x
Expression::create_unary_expression("negate", vec![lit(1i64), col("x")]),
]),
column: "x",
left: None,
right: None,
expect_mono: Monotonicity {
is_monotonic: false,
is_positive: false,
is_constant: false,
left: None,
right: None,
},
error: "",
},
];
for t in test_suite.into_iter() {
verify_test(t)?;
}
Ok(())
}
#[test]
fn test_arithmetic_mul_div() -> Result<()> {
let test_suite = vec![
Test {
name: "f(x) = -5 * x",
expr: Expression::create_binary_expression("*", vec![lit(-5_i8), col("x")]),
column: "x",
left: None,
right: None,
expect_mono: Monotonicity {
is_monotonic: true,
is_positive: false,
is_constant: false,
left: None,
right: None,
},
error: "",
},
Test {
name: "f(x) = -1/x",
expr: Expression::create_binary_expression("/", vec![lit(-1_i8), col("x")]),
column: "x",
left: create_f64(5.0),
right: create_f64(10.0),
expect_mono: Monotonicity {
is_monotonic: true,
is_positive: true,
is_constant: false,
left: create_f64(-0.2),
right: create_f64(-0.1),
},
error: "",
},
Test {
name: "f(x) = x/10",
expr: Expression::create_binary_expression("/", vec![col("x"), lit(10_i8)]),
column: "x",
left: None,
right: None,
expect_mono: Monotonicity {
is_monotonic: true,
is_positive: true,
is_constant: false,
left: None,
right: None,
},
error: "",
},
Test {
name: "f(x) = x * (x-12) where x in [10-1000]",
expr: Expression::create_binary_expression("*", vec![
col("x"),
Expression::create_binary_expression("-", vec![col("x"), lit(12_i64)]),
]),
column: "x",
left: create_f64(10.0),
right: create_f64(1000.0),
expect_mono: Monotonicity {
is_monotonic: false,
is_positive: false,
is_constant: false,
left: None,
right: None,
},
error: "",
},
Test {
name: "f(x) = x * (x-12) where x in [12, 100]",
expr: Expression::create_binary_expression("*", vec![
col("x"),
Expression::create_binary_expression("-", vec![col("x"), lit(12_i64)]),
]),
column: "x",
left: create_f64(12.0),
right: create_f64(100.0),
expect_mono: Monotonicity {
is_monotonic: true,
is_positive: true,
is_constant: false,
left: create_f64(0.0),
right: create_f64(8800.0),
},
error: "",
},
Test {
name: "f(x) = x/(1/x) where x >= 1",
expr: Expression::create_binary_expression("/", vec![
col("x"),
Expression::create_binary_expression("/", vec![lit(1_i8), col("x")]),
]),
column: "x",
left: create_f64(1.0),
right: create_f64(2.0),
expect_mono: Monotonicity {
is_monotonic: true,
is_positive: true,
is_constant: false,
left: create_f64(1.0),
right: create_f64(4.0),
},
error: "",
},
Test {
name: "f(x) = -x/(2/(x-2)) where x in [0-10]",
expr: Expression::create_binary_expression("/", vec![
Expression::create_unary_expression("negate", vec![col("x")]),
Expression::create_binary_expression("/", vec![
lit(2_i8),
Expression::create_binary_expression("-", vec![col("x"), lit(2_i8)]),
]),
]),
column: "x",
left: create_f64(0.0),
right: create_f64(10.0),
expect_mono: Monotonicity {
is_monotonic: false,
is_positive: false,
is_constant: false,
left: None,
right: None,
},
error: "",
},
Test {
name: "f(x) = -x/(2/(x-2)) where x in [4-10]",
expr: Expression::create_binary_expression("/", vec![
Expression::create_unary_expression("negate", vec![col("x")]),
Expression::create_binary_expression("/", vec![
lit(2_i8),
Expression::create_binary_expression("-", vec![col("x"), lit(2_i8)]),
]),
]),
column: "x",
left: create_f64(4.0),
right: create_f64(10.0),
expect_mono: Monotonicity {
is_monotonic: true,
is_positive: false,
is_constant: false,
left: create_f64(-4.0),
right: create_f64(-40.0),
},
error: "",
},
];
for t in test_suite.into_iter() {
verify_test(t)?;
}
Ok(())
}
#[test]
fn test_abs_function() -> Result<()> {
let test_suite = vec![
Test {
name: "f(x) = abs(x + 12)",
expr: Expression::create_scalar_function("abs", vec![
Expression::create_binary_expression("+", vec![col("x"), lit(12i32)]),
]),
column: "x",
left: None,
right: None,
expect_mono: Monotonicity {
is_monotonic: false,
is_positive: true,
is_constant: false,
left: None,
right: None,
},
error: "",
},
Test {
name: "f(x) = abs(x) where 0 <= x <= 10",
expr: Expression::create_scalar_function("abs", vec![col("x")]),
column: "x",
left: create_f64(0.0),
right: create_f64(10.0),
expect_mono: Monotonicity {
is_monotonic: true,
is_positive: true,
is_constant: false,
left: create_f64(0.0),
right: create_f64(10.0),
},
error: "",
},
Test {
name: "f(x) = abs(x) where -10 <= x <= -2",
expr: Expression::create_scalar_function("abs", vec![col("x")]),
column: "x",
left: create_f64(-10.0),
right: create_f64(-2.0),
expect_mono: Monotonicity {
is_monotonic: true,
is_positive: false,
is_constant: false,
left: create_f64(10.0),
right: create_f64(2.0),
},
error: "",
},
Test {
name: "f(x) = abs(x) where -5 <= x <= 5", // should NOT be monotonic
expr: Expression::create_scalar_function("abs", vec![col("x")]),
column: "x",
left: create_f64(-5.0),
right: create_f64(5.0),
expect_mono: Monotonicity {
is_monotonic: false,
is_positive: false,
is_constant: false,
left: None,
right: None,
},
error: "",
},
Test {
name: "f(x) = abs(x + 12) where -12 <= x <= 1000",
expr: Expression::create_scalar_function("abs", vec![
Expression::create_binary_expression("+", vec![col("x"), lit(12i32)]),
]),
column: "x",
left: create_f64(-12.0),
right: create_f64(1000.0),
expect_mono: Monotonicity {
is_monotonic: true,
is_positive: true,
is_constant: false,
left: create_f64(0.0),
right: create_f64(1012.0),
},
error: "",
},
Test {
name: "f(x) = abs(x + 12) where -14 <= x <= 20", // should NOT be monotonic
expr: Expression::create_scalar_function("abs", vec![
Expression::create_binary_expression("+", vec![col("x"), lit(12i32)]),
]),
column: "x",
left: create_f64(-14.0),
right: create_f64(20.0),
expect_mono: Monotonicity {
is_monotonic: false,
is_positive: true,
is_constant: false,
left: None,
right: None,
},
error: "",
},
Test {
name: "f(x) = abs( (x - 7) + (x - 3) ) where 5 <= x <= 100",
expr: Expression::create_scalar_function("abs", vec![
Expression::create_binary_expression("+", vec![
Expression::create_binary_expression("-", vec![col("x"), lit(7_i32)]),
Expression::create_binary_expression("-", vec![col("x"), lit(3_i32)]),
]),
]),
column: "x",
left: create_f64(5.0),
right: create_f64(100.0),
expect_mono: Monotonicity {
is_monotonic: true,
is_positive: true,
is_constant: false,
left: create_f64(0.0),
right: create_f64(190.0),
},
error: "",
},
Test {
name: "f(x) = abs( (-x + 8) - x) where -100 <= x <= 4",
expr: Expression::create_scalar_function("abs", vec![
Expression::create_binary_expression("-", vec![
Expression::create_binary_expression("+", vec![
Expression::create_unary_expression("negate", vec![col("x")]),
lit(8_i64),
]),
col("x"),
]),
]),
column: "x",
left: create_f64(-100.0),
right: create_f64(4.0),
expect_mono: Monotonicity {
is_monotonic: true,
is_positive: false,
is_constant: false,
left: create_f64(208.0),
right: create_f64(0.0),
},
error: "",
},
];
for t in test_suite.into_iter() {
verify_test(t)?;
}
Ok(())
}
#[test]
fn test_dates_function() -> Result<()> {
let test_suite = vec![
Test {
name: "f(x) = toStartOfWeek(x+12)",
expr: Expression::create_scalar_function("toStartOfWeek", vec![
Expression::create_binary_expression("+", vec![col("x"), lit(12i32)]),
]),
column: "x",
left: None,
right: None,
expect_mono: Monotonicity {
is_monotonic: true,
is_positive: true,
is_constant: false,
left: None,
right: None,
},
error: "",
},
Test {
name: "f(x) = toMonday(x)",
expr: Expression::create_scalar_function("toMonday", vec![col("x")]),
column: "x",
left: None,
right: None,
expect_mono: Monotonicity {
is_monotonic: true,
is_positive: true,
is_constant: false,
left: None,
right: None,
},
error: "",
},
Test {
name: "f(x) = toSecond(x)",
expr: Expression::create_scalar_function("toSecond", vec![col("x")]), | right: None,
expect_mono: Monotonicity {
is_monotonic: false,
is_positive: true,
is_constant: false,
left: None,
right: None,
},
error: "",
},
Test {
name: "f(z) = toSecond(z)",
expr: Expression::create_scalar_function("toSecond", vec![col("z")]),
column: "z",
left: create_datetime(1638288000),
right: create_datetime(1638288059),
expect_mono: Monotonicity {
is_monotonic: true,
is_positive: true,
is_constant: false,
left: create_u8(0),
right: create_u8(59),
},
error: "",
},
Test {
name: "f(z) = toDayOfYear(z)",
expr: Expression::create_scalar_function("toDayOfYear", vec![col("z")]),
column: "z",
left: create_datetime(1606752119),
right: create_datetime(1638288059),
expect_mono: Monotonicity {
is_monotonic: false,
is_positive: true,
is_constant: false,
left: None,
right: None,
},
error: "",
},
Test {
name: "f(z) = toStartOfHour(z)",
expr: Expression::create_scalar_function("toStartOfHour", vec![col("z")]),
column: "z",
left: None,
right: None,
expect_mono: Monotonicity {
is_monotonic: true,
is_positive: true,
is_constant: false,
left: None,
right: None,
},
error: "",
},
];
for t in test_suite.into_iter() {
verify_test(t)?;
}
Ok(())
} | column: "x",
left: None, |
v1_matrix_kind.go | // Copyright 2018-2021 Polyaxon, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package service_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/validate"
)
// V1MatrixKind v1 matrix kind
//
// swagger:model v1MatrixKind
type V1MatrixKind string
const (
// V1MatrixKindRandom captures enum value "random"
V1MatrixKindRandom V1MatrixKind = "random"
// V1MatrixKindGrid captures enum value "grid"
V1MatrixKindGrid V1MatrixKind = "grid" | V1MatrixKindHyperband V1MatrixKind = "hyperband"
// V1MatrixKindBayes captures enum value "bayes"
V1MatrixKindBayes V1MatrixKind = "bayes"
// V1MatrixKindHyperopt captures enum value "hyperopt"
V1MatrixKindHyperopt V1MatrixKind = "hyperopt"
// V1MatrixKindIterative captures enum value "iterative"
V1MatrixKindIterative V1MatrixKind = "iterative"
// V1MatrixKindMapping captures enum value "mapping"
V1MatrixKindMapping V1MatrixKind = "mapping"
)
// for schema
var v1MatrixKindEnum []interface{}
func init() {
var res []V1MatrixKind
if err := json.Unmarshal([]byte(`["random","grid","hyperband","bayes","hyperopt","iterative","mapping"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
v1MatrixKindEnum = append(v1MatrixKindEnum, v)
}
}
func (m V1MatrixKind) validateV1MatrixKindEnum(path, location string, value V1MatrixKind) error {
if err := validate.EnumCase(path, location, value, v1MatrixKindEnum, true); err != nil {
return err
}
return nil
}
// Validate validates this v1 matrix kind
func (m V1MatrixKind) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateV1MatrixKindEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// ContextValidate validates this v1 matrix kind based on context it is used
func (m V1MatrixKind) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
return nil
} |
// V1MatrixKindHyperband captures enum value "hyperband" |
nanomodal.js | var nanoModal;
(function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);throw new Error("Cannot find module '"+o+"'")}var f=n[o]={exports:{}};t[o][0].call(f.exports,function(e){var n=t[o][1][e];return s(n?n:e)},f,f.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(require,module,exports){
var ModalEvent = require("./ModalEvent");
function El(tag, classNames) {
var doc = document;
var el = (tag.nodeType || tag === window) ? tag : doc.createElement(tag);
var eventHandlers = [];
if (classNames) {
el.className = classNames;
}
var onShowEvent = ModalEvent();
var onHideEvent = ModalEvent();
var addListener = function(event, handler) {
if (el.addEventListener) {
el.addEventListener(event, handler, false);
} else {
el.attachEvent("on" + event, handler);
}
eventHandlers.push({
event: event,
handler: handler
});
};
var removeListener = function(event, handler) {
if (el.removeEventListener) {
el.removeEventListener(event, handler);
} else {
el.detachEvent("on" + event, handler);
}
var t = eventHandlers.length;
var handlerObj;
while (t-- > 0) {
handlerObj = eventHandlers[t];
if (handlerObj.event === event && handlerObj.handler === handler) {
eventHandlers.splice(t, 1);
break;
}
}
};
var addClickListener = function(handler) {
var throttle = false;
var throttleHandler = function(e) {
if (!throttle) {
throttle = true;
setTimeout(function() {
throttle = false;
}, 100);
handler(e);
}
};
addListener("touchstart", throttleHandler);
addListener("mousedown", throttleHandler);
};
var show = function(arg) {
if (el) {
el.style.display = "block";
onShowEvent.fire(arg);
}
};
var hide = function(arg) {
if (el) {
el.style.display = "none";
onHideEvent.fire(arg);
}
};
var isShowing = function() {
return el.style && el.style.display === "block";
};
var html = function(html) {
if (el) {
el.innerHTML = html;
}
};
var text = function(text) {
if (el) {
html("");
el.appendChild(doc.createTextNode(text));
}
};
var remove = function() {
if (el.parentNode) {
var x = eventHandlers.length;
var eventHandler;
while (x-- > 0) {
eventHandler = eventHandlers[x];
removeListener(eventHandler.event, eventHandler.handler);
}
el.parentNode.removeChild(el);
onShowEvent.removeAllListeners();
onHideEvent.removeAllListeners();
}
};
var add = function(elObject) {
var elementToAppend = elObject.el || elObject;
el.appendChild(elementToAppend);
};
return {
el: el,
addListener: addListener,
addClickListener: addClickListener,
onShowEvent: onShowEvent,
onHideEvent: onHideEvent,
show: show,
hide: hide,
isShowing: isShowing,
html: html,
text: text,
remove: remove,
add: add
};
}
module.exports = El;
},{"./ModalEvent":3}],2:[function(require,module,exports){
var El = require("./El");
function Modal(content, options, overlay, customShow, customHide) {
if (content === undefined) {
return;
}
options = options || {};
var modal = El("div", "nanoModal nanoModalOverride " + (options.classes || ""));
var contentContainer = El("div", "nanoModalContent");
var buttonArea = El("div", "nanoModalButtons");
var onRequestHideListenerId;
modal.add(contentContainer);
modal.add(buttonArea);
modal.el.style.display = "none";
var buttons = [];
var pub;
options.buttons = options.buttons || [{
text: "Close",
handler: "hide",
primary: true
}];
var removeButtons = function() {
var t = buttons.length;
while (t-- > 0) {
var button = buttons[t];
button.remove();
}
buttons = [];
};
var center = function() {
modal.el.style.marginLeft = -modal.el.clientWidth / 2 + "px";
};
var anyModalsOpen = function() {
var modals = document.querySelectorAll(".nanoModal");
var t = modals.length;
while (t-- > 0) {
if (modals[t].style.display !== "none") {
return true;
}
}
return false;
};
var defaultShow = function() {
if (!modal.isShowing()) {
// Call the static method from the Modal module.
Modal.resizeOverlay();
overlay.show(overlay);
modal.show(pub);
center();
}
};
var defaultHide = function() {
if (modal.isShowing()) {
modal.hide(pub);
if (!anyModalsOpen()) {
overlay.hide(overlay);
}
if (options.autoRemove) {
pub.remove();
}
}
};
var quickClone = function(obj) {
var newObj = {};
for (var key in obj) {
if (obj.hasOwnProperty(key)) {
newObj[key] = obj[key];
}
}
return newObj;
};
pub = {
modal: modal,
overlay: overlay,
show: function() {
if (customShow) {
customShow(defaultShow, pub);
} else {
defaultShow();
}
return pub;
},
hide: function() {
if (customHide) {
customHide(defaultHide, pub);
} else {
defaultHide();
}
return pub;
},
onShow: function(callback) {
modal.onShowEvent.addListener(function() {
callback(pub);
});
return pub;
},
onHide: function(callback) {
modal.onHideEvent.addListener(function() {
callback(pub);
});
return pub;
},
remove: function() {
overlay.onRequestHide.removeListener(onRequestHideListenerId);
onRequestHideListenerId = null;
removeButtons();
modal.remove();
},
setButtons: function(buttonList) {
var btnIdx = buttonList.length;
var btnObj;
var btnEl;
var classes;
var giveButtonCustomClickListener = function(btnEl, btnObj) {
var pubCopy = quickClone(pub);
btnEl.addClickListener(function(e) {
pubCopy.event = e || window.event;
btnObj.handler(pubCopy);
});
};
removeButtons();
if (btnIdx === 0) { | while (btnIdx-- > 0) {
btnObj = buttonList[btnIdx];
classes = "nanoModalBtn";
if (btnObj.primary) {
classes += " nanoModalBtnPrimary";
}
classes += btnObj.classes ? " " + btnObj.classes : "";
btnEl = El("button", classes);
if (btnObj.handler === "hide") {
btnEl.addClickListener(pub.hide);
} else if (btnObj.handler) {
giveButtonCustomClickListener(btnEl, btnObj);
}
btnEl.text(btnObj.text);
buttonArea.add(btnEl);
buttons.push(btnEl);
}
}
center();
return pub;
},
setContent: function(newContent) {
// Only good way of checking if a node in IE8...
if (newContent.nodeType) {
contentContainer.html("");
contentContainer.add(newContent);
} else {
contentContainer.html(newContent);
}
center();
content = newContent;
return pub;
},
getContent: function() {
return content;
}
};
onRequestHideListenerId = overlay.onRequestHide.addListener(function() {
if (options.overlayClose !== false && modal.isShowing()) {
pub.hide();
}
});
pub.setContent(content).setButtons(options.buttons);
document.body.appendChild(modal.el);
return pub;
}
var doc = document;
var getDocumentDim = function(name) {
var docE = doc.documentElement;
var scroll = "scroll" + name;
var offset = "offset" + name;
return Math.max(doc.body[scroll], docE[scroll],
doc.body[offset], docE[offset], docE["client" + name]);
};
// Make this a static function so that main.js has access to it so it can
// add a window keydown event listener. Modal.js also needs this function.
Modal.resizeOverlay = function() {
var overlay = doc.getElementById("nanoModalOverlay");
overlay.style.width = getDocumentDim("Width") + "px";
overlay.style.height = getDocumentDim("Height") + "px";
};
module.exports = Modal;
},{"./El":1}],3:[function(require,module,exports){
function ModalEvent() {
var listeners = {};
var nextListenerId = 0;
var addListener = function(callback) {
listeners[nextListenerId] = callback;
return nextListenerId++;
};
var removeListener = function(id) {
if (id) {
delete listeners[id];
}
};
var removeAllListeners = function() {
listeners = {};
};
var fire = function() {
for (var x = 0, num = nextListenerId; x < num; ++x) {
if (listeners[x]) {
listeners[x].apply(null, arguments);
}
}
};
return {
addListener: addListener,
removeListener: removeListener,
removeAllListeners: removeAllListeners,
fire: fire
};
}
module.exports = ModalEvent;
},{}],4:[function(require,module,exports){
var ModalEvent = require("./ModalEvent");
var nanoModalAPI = (function() {
var El = require("./El");
var Modal = require("./Modal");
var overlay;
var doc = document;
function init() {
if (!doc.querySelector("#nanoModalOverlay")) {
// Put the main styles on the page.
var styleObj = El("style");
var style = styleObj.el;
var firstElInHead = doc.querySelectorAll("head")[0].childNodes[0];
firstElInHead.parentNode.insertBefore(style, firstElInHead);
var styleText = ".nanoModal{position:absolute;top:100px;left:50%;display:none;z-index:9999;min-width:300px;padding:15px 20px 10px;-webkit-border-radius:10px;-moz-border-radius:10px;border-radius:10px;background:#fff;background:-moz-linear-gradient(top,#fff 0,#ddd 100%);background:-webkit-gradient(linear,left top,left bottom,color-stop(0%,#fff),color-stop(100%,#ddd));background:-webkit-linear-gradient(top,#fff 0,#ddd 100%);background:-o-linear-gradient(top,#fff 0,#ddd 100%);background:-ms-linear-gradient(top,#fff 0,#ddd 100%);background:linear-gradient(to bottom,#fff 0,#ddd 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffff', endColorstr='#dddddd', GradientType=0)}.nanoModalOverlay{position:absolute;top:0;left:0;width:100%;height:100%;z-index:9998;background:#000;display:none;-ms-filter:\"alpha(Opacity=50)\";-moz-opacity:.5;-khtml-opacity:.5;opacity:.5}.nanoModalButtons{border-top:1px solid #ddd;margin-top:15px;text-align:right}.nanoModalBtn{color:#333;background-color:#fff;display:inline-block;padding:6px 12px;margin:8px 4px 0;font-size:14px;text-align:center;white-space:nowrap;vertical-align:middle;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;border:1px solid transparent;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.nanoModalBtn:active,.nanoModalBtn:focus,.nanoModalBtn:hover{color:#333;background-color:#e6e6e6;border-color:#adadad}.nanoModalBtn.nanoModalBtnPrimary{color:#fff;background-color:#428bca;border-color:#357ebd}.nanoModalBtn.nanoModalBtnPrimary:active,.nanoModalBtn.nanoModalBtnPrimary:focus,.nanoModalBtn.nanoModalBtnPrimary:hover{color:#fff;background-color:#3071a9;border-color:#285e8e}";
if (style.styleSheet) {
style.styleSheet.cssText = styleText;
} else {
styleObj.text(styleText);
}
// Make the overlay and put it on the page.
overlay = El("div", "nanoModalOverlay nanoModalOverride");
overlay.el.id = "nanoModalOverlay";
doc.body.appendChild(overlay.el);
// Add an event so that the modals can hook into it to close.
overlay.onRequestHide = ModalEvent();
var overlayCloseFunc = function() {
overlay.onRequestHide.fire();
};
overlay.addClickListener(overlayCloseFunc);
El(doc).addListener("keydown", function(e) {
var keyCode = e.which || e.keyCode;
if (keyCode === 27) { // 27 is Escape
overlayCloseFunc();
}
});
var windowEl = El(window);
var resizeOverlayTimeout;
windowEl.addListener("resize", function() {
if (resizeOverlayTimeout) {
clearTimeout(resizeOverlayTimeout);
}
resizeOverlayTimeout = setTimeout(Modal.resizeOverlay, 100);
});
// Make SURE we have the correct dimensions so we make the overlay the right size.
// Some devices fire the event before the document is ready to return the new dimensions.
windowEl.addListener("orientationchange", function() {
for (var t = 0; t < 3; ++t) {
setTimeout(Modal.resizeOverlay, 1000 * t + 200);
}
});
}
}
if (document.body) {
init();
}
var api = function(content, options) {
init();
return Modal(content, options, overlay, api.customShow, api.customHide);
};
api.resizeOverlay = Modal.resizeOverlay;
return api;
})();
// expose api to var outside browserify so that we can export a module correctly.
nanoModal = nanoModalAPI;
},{"./El":1,"./Modal":2,"./ModalEvent":3}]},{},[1,2,3,4]);
if (typeof window !== "undefined") {
if (typeof window.define === "function" && window.define.amd) {
window.define(function() {
return nanoModal;
});
}
window.nanoModal = nanoModal;
}
if (typeof module !== "undefined") {
module.exports = nanoModal;
} | buttonArea.hide();
} else {
buttonArea.show(); |
packets.ping.go | package sp2p
type pingReq struct{}
func (t *pingReq) T() byte { return pingReqT }
func (t *pingReq) String() string { return pingReqS }
func (t *pingReq) OnHandle(p ISP2P, msg *KMsg) {
node, err := nodeFromKMsg(msg)
if err != nil {
getLog().Error("NodeFromKMsg error", "err", err)
return
}
p.UpdateNode(node.string()) | } |
|
main.rs | async fn say_world() { |
#[tokio::main]
async fn main() {
// Calling `say_world()` does not execute the body of `say_world()`.
let op = say_world();
// This println! comes first
println!("hello");
// Calling `.await` on `op` starts executing `say_world`.
op.await;
} | println!("world");
} |
mod.rs | #[cfg(feature = "ipc-transport")]
mod ipc;
#[cfg(feature = "tcp-transport")]
mod tcp;
use crate::codec::FramedIo;
use crate::endpoint::Endpoint;
use crate::task_handle::TaskHandle;
use crate::ZmqResult;
macro_rules! do_if_enabled {
($feature:literal, $body:expr) => {{
#[cfg(feature = $feature)]
{
$body
}
#[cfg(not(feature = $feature))]
panic!("feature \"{}\" is not enabled", $feature)
}};
}
/// Connectes to the given endpoint
///
/// # Panics
/// Panics if the requested endpoint uses a transport type that isn't enabled
pub(crate) async fn connect(endpoint: &Endpoint) -> ZmqResult<(FramedIo, Endpoint)> {
match endpoint {
Endpoint::Tcp(_host, _port) => {
do_if_enabled!("tcp-transport", tcp::connect(_host, *_port).await)
}
Endpoint::Ipc(_path) => do_if_enabled!(
"ipc-transport",
if let Some(path) = _path {
ipc::connect(path).await
} else {
Err(crate::error::ZmqError::Socket(
"Cannot connect to an unnamed ipc socket",
))
}
),
}
}
pub struct AcceptStopHandle(pub(crate) TaskHandle<()>);
/// Spawns an async task that listens for connections at the provided endpoint.
///
/// `cback` will be invoked when a connection is accepted. If the result was
/// `Ok`, it will receive a tuple containing the framed raw socket, along with
/// the endpoint of the remote connection accepted.
///
/// Returns a ZmqResult, which when Ok is a tuple of the resolved bound
/// endpoint, as well as a channel to stop the async accept task
///
/// # Panics
/// Panics if the requested endpoint uses a transport type that isn't enabled
pub(crate) async fn | <T>(
endpoint: Endpoint,
cback: impl Fn(ZmqResult<(FramedIo, Endpoint)>) -> T + Send + 'static,
) -> ZmqResult<(Endpoint, AcceptStopHandle)>
where
T: std::future::Future<Output = ()> + Send + 'static,
{
let _cback = cback;
match endpoint {
Endpoint::Tcp(_host, _port) => do_if_enabled!(
"tcp-transport",
tcp::begin_accept(_host, _port, _cback).await
),
Endpoint::Ipc(_path) => do_if_enabled!(
"ipc-transport",
if let Some(path) = _path {
ipc::begin_accept(&path, _cback).await
} else {
Err(crate::error::ZmqError::Socket(
"Cannot begin accepting peers at an unnamed ipc socket",
))
}
),
}
}
#[allow(unused)]
#[cfg(feature = "tokio-runtime")]
fn make_framed<T>(stream: T) -> FramedIo
where
T: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + 'static,
{
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
let (read, write) = tokio::io::split(stream);
FramedIo::new(Box::new(read.compat()), Box::new(write.compat_write()))
}
#[allow(unused)]
#[cfg(feature = "async-std-runtime")]
fn make_framed<T>(stream: T) -> FramedIo
where
T: futures::AsyncRead + futures::AsyncWrite + Send + Sync + 'static,
{
use futures::AsyncReadExt;
let (read, write) = stream.split();
FramedIo::new(Box::new(read), Box::new(write))
}
| begin_accept |
smoke_test.go | // +build integration
package wafv2
import (
"context"
"errors"
"testing"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/wafv2"
"github.com/aws/aws-sdk-go-v2/service/wafv2/types"
"github.com/aws/smithy-go"
"github.com/aws/aws-sdk-go-v2/service/internal/integrationtest"
)
func TestInteg_00_ListWebACLs(t *testing.T) {
ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second)
defer cancelFn()
cfg, _ := integrationtest.LoadConfigWithDefaultRegion("us-east-1")
svc := wafv2.NewFromConfig(cfg)
input := &wafv2.ListWebACLsInput{
Limit: aws.Int32(20),
Scope: types.ScopeRegional,
}
_, err := svc.ListWebACLs(ctx, input)
if err != nil {
t.Errorf("expect no error, got %v", err)
}
}
func | (t *testing.T) {
ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second)
defer cancelFn()
cfg, _ := integrationtest.LoadConfigWithDefaultRegion("us-east-1")
svc := wafv2.NewFromConfig(cfg)
input := &wafv2.CreateRegexPatternSetInput{
Name: aws.String("fake_name"),
Scope: types.ScopeRegional,
}
_, err := svc.CreateRegexPatternSet(ctx, input)
if err == nil {
t.Fatalf("expect request to fail")
}
var apiErr smithy.APIError
if !errors.As(err, &apiErr) {
t.Fatalf("expect error to be API error, was not, %v", err)
}
if len(apiErr.ErrorCode()) == 0 {
t.Errorf("expect non-empty error code")
}
if len(apiErr.ErrorMessage()) == 0 {
t.Errorf("expect non-empty error message")
}
}
| TestInteg_01_CreateRegexPatternSet |
header.rs | mod key;
pub use key::HeaderKey;
mod value;
pub use value::HeaderValue;
/// A single HTTP-Header Pair(Key-Value)
#[derive(Clone, Debug)]
pub struct Header<'a> {
/// The Key part of the Header
pub key: HeaderKey<'a>,
/// The Value assosicated with the Header
pub value: HeaderValue<'a>,
}
impl PartialEq for Header<'_> {
fn eq(&self, other: &Self) -> bool {
self.key.eq(&other.key)
}
}
impl<'a> Header<'a> {
/// Serializes the Header into the given Buffer
/// by appending the final Data to the End of it
pub fn serialize(&self, buf: &mut Vec<u8>) {
self.key.serialize(buf);
buf.extend_from_slice(": ".as_bytes());
self.value.serialize(buf);
buf.extend_from_slice("\r\n".as_bytes());
}
/// Clones all the assosicated Data to create a completly
/// new and independant Header instance
pub fn | <'refed, 'owned>(&'refed self) -> Header<'owned> {
Header {
key: self.key.to_owned(),
value: self.value.to_owned(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn compare() {
assert_eq!(
Header {
key: HeaderKey::StrRef("test"),
value: HeaderValue::StrRef("value"),
},
Header {
key: HeaderKey::StrRef("test"),
value: HeaderValue::StrRef("some other value"),
}
);
}
#[test]
fn serialize() {
let header = Header {
key: HeaderKey::StrRef("test-key"),
value: HeaderValue::StrRef("test-value"),
};
let mut buf: Vec<u8> = Vec::new();
header.serialize(&mut buf);
assert_eq!("test-key: test-value\r\n".as_bytes(), &buf);
}
}
| to_owned |
filterNamespace_import_es5.2.minified.js | var ns;
import _class_call_check from "@swc/helpers/lib/_class_call_check.js";
!function(ns1) {
var nested, NestedClass, Class = function() {
"use strict";
_class_call_check(this, Class); | ns1.Class = Class, ns1.Value = "", nested = ns1.nested || (ns1.nested = {}), NestedClass = function() {
"use strict";
_class_call_check(this, NestedClass);
}, nested.NestedClass = NestedClass;
}(ns || (ns = {}));
export default ns;
ns.Class, ns.Value; | }; |
record_test.py | dd = readdata.Dexcom.FindDevice()
dr = readdata.Dexcom(dd)
meter_records = dr.ReadRecords('METER_DATA')
print('First Meter Record = ')
print(meter_records[0])
print('Last Meter Record =')
print(meter_records[-1])
insertion_records = dr.ReadRecords('INSERTION_TIME')
print('First Insertion Record = ')
print(insertion_records[0])
print('Last Insertion Record = ')
print(insertion_records[-1]) | from future import print_function
import readdata
|
|
remove_certificate_responses.go | // Code generated by go-swagger; DO NOT EDIT.
package security_certificates
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
)
// RemoveCertificateReader is a Reader for the RemoveCertificate structure.
type RemoveCertificateReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *RemoveCertificateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 403:
result := NewRemoveCertificateForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil |
return nil, result
default:
return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
}
}
// NewRemoveCertificateForbidden creates a RemoveCertificateForbidden with default headers values
func NewRemoveCertificateForbidden() *RemoveCertificateForbidden {
return &RemoveCertificateForbidden{}
}
/* RemoveCertificateForbidden describes a response with status code 403, with default header values.
Insufficient permissions to remove certificate from the trust store
*/
type RemoveCertificateForbidden struct {
}
func (o *RemoveCertificateForbidden) Error() string {
return fmt.Sprintf("[DELETE /v1/security/ssl/truststore/{id}][%d] removeCertificateForbidden ", 403)
}
func (o *RemoveCertificateForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
| {
return nil, err
} |
types.rs | #![allow(non_snake_case)]
use crate::hir::def_id::DefId;
use lint::{LateContext, LintArray, LintContext};
use lint::{LateLintPass, LintPass};
use rustc::hir;
use rustc::hir::{is_range_literal, ExprKind, Node};
use rustc::lint;
use rustc::mir::interpret::{sign_extend, truncate};
use rustc::ty::layout::{self, IntegerExt, LayoutOf, SizeSkeleton, VariantIdx};
use rustc::ty::subst::SubstsRef;
use rustc::ty::{self, AdtKind, ParamEnv, Ty, TyCtxt};
use rustc_data_structures::fx::FxHashSet;
use rustc_index::vec::Idx;
use rustc_span::source_map;
use rustc_span::symbol::sym;
use rustc_span::Span;
use rustc_target::spec::abi::Abi;
use syntax::errors::Applicability;
use syntax::{ast, attr};
use log::debug;
use std::cmp;
use std::{f32, f64, i16, i32, i64, i8, u16, u32, u64, u8};
declare_lint! {
UNUSED_COMPARISONS,
Warn,
"comparisons made useless by limits of the types involved"
}
declare_lint! {
OVERFLOWING_LITERALS,
Deny,
"literal out of range for its type"
}
declare_lint! {
VARIANT_SIZE_DIFFERENCES,
Allow,
"detects enums with widely varying variant sizes"
}
#[derive(Copy, Clone)]
pub struct TypeLimits {
/// Id of the last visited negated expression
negated_expr_id: hir::HirId,
}
impl_lint_pass!(TypeLimits => [UNUSED_COMPARISONS, OVERFLOWING_LITERALS]);
impl TypeLimits {
pub fn new() -> TypeLimits {
TypeLimits { negated_expr_id: hir::DUMMY_HIR_ID }
}
}
/// Attempts to special-case the overflowing literal lint when it occurs as a range endpoint.
/// Returns `true` iff the lint was overridden.
fn lint_overflowing_range_endpoint<'a, 'tcx>(
cx: &LateContext<'a, 'tcx>,
lit: &hir::Lit,
lit_val: u128,
max: u128,
expr: &'tcx hir::Expr<'tcx>,
parent_expr: &'tcx hir::Expr<'tcx>,
ty: &str,
) -> bool {
// We only want to handle exclusive (`..`) ranges,
// which are represented as `ExprKind::Struct`.
if let ExprKind::Struct(_, eps, _) = &parent_expr.kind {
if eps.len() != 2 {
return false;
}
// We can suggest using an inclusive range
// (`..=`) instead only if it is the `end` that is
// overflowing and only by 1.
if eps[1].expr.hir_id == expr.hir_id && lit_val - 1 == max {
let mut err = cx.struct_span_lint(
OVERFLOWING_LITERALS,
parent_expr.span,
&format!("range endpoint is out of range for `{}`", ty),
);
if let Ok(start) = cx.sess().source_map().span_to_snippet(eps[0].span) {
use ast::{LitIntType, LitKind};
// We need to preserve the literal's suffix,
// as it may determine typing information.
let suffix = match lit.node {
LitKind::Int(_, LitIntType::Signed(s)) => format!("{}", s.name_str()),
LitKind::Int(_, LitIntType::Unsigned(s)) => format!("{}", s.name_str()),
LitKind::Int(_, LitIntType::Unsuffixed) => "".to_owned(),
_ => bug!(),
};
let suggestion = format!("{}..={}{}", start, lit_val - 1, suffix);
err.span_suggestion(
parent_expr.span,
&"use an inclusive range instead",
suggestion,
Applicability::MachineApplicable,
);
err.emit();
return true;
}
}
}
false
}
// For `isize` & `usize`, be conservative with the warnings, so that the
// warnings are consistent between 32- and 64-bit platforms.
fn int_ty_range(int_ty: ast::IntTy) -> (i128, i128) {
match int_ty {
ast::IntTy::Isize => (i64::min_value() as i128, i64::max_value() as i128),
ast::IntTy::I8 => (i8::min_value() as i64 as i128, i8::max_value() as i128),
ast::IntTy::I16 => (i16::min_value() as i64 as i128, i16::max_value() as i128),
ast::IntTy::I32 => (i32::min_value() as i64 as i128, i32::max_value() as i128),
ast::IntTy::I64 => (i64::min_value() as i128, i64::max_value() as i128),
ast::IntTy::I128 => (i128::min_value() as i128, i128::max_value()),
}
}
fn uint_ty_range(uint_ty: ast::UintTy) -> (u128, u128) {
match uint_ty {
ast::UintTy::Usize => (u64::min_value() as u128, u64::max_value() as u128),
ast::UintTy::U8 => (u8::min_value() as u128, u8::max_value() as u128),
ast::UintTy::U16 => (u16::min_value() as u128, u16::max_value() as u128),
ast::UintTy::U32 => (u32::min_value() as u128, u32::max_value() as u128),
ast::UintTy::U64 => (u64::min_value() as u128, u64::max_value() as u128),
ast::UintTy::U128 => (u128::min_value(), u128::max_value()),
}
}
fn get_bin_hex_repr(cx: &LateContext<'_, '_>, lit: &hir::Lit) -> Option<String> {
let src = cx.sess().source_map().span_to_snippet(lit.span).ok()?;
let firstch = src.chars().next()?;
if firstch == '0' {
match src.chars().nth(1) {
Some('x') | Some('b') => return Some(src),
_ => return None,
}
}
None
}
fn report_bin_hex_error(
cx: &LateContext<'_, '_>,
expr: &hir::Expr<'_>,
ty: attr::IntType,
repr_str: String,
val: u128,
negative: bool,
) {
let size = layout::Integer::from_attr(&cx.tcx, ty).size();
let (t, actually) = match ty {
attr::IntType::SignedInt(t) => {
let actually = sign_extend(val, size) as i128;
(t.name_str(), actually.to_string())
}
attr::IntType::UnsignedInt(t) => {
let actually = truncate(val, size);
(t.name_str(), actually.to_string())
}
};
let mut err = cx.struct_span_lint(
OVERFLOWING_LITERALS,
expr.span,
&format!("literal out of range for {}", t),
);
err.note(&format!(
"the literal `{}` (decimal `{}`) does not fit into \
an `{}` and will become `{}{}`",
repr_str, val, t, actually, t
));
if let Some(sugg_ty) = get_type_suggestion(&cx.tables.node_type(expr.hir_id), val, negative) {
if let Some(pos) = repr_str.chars().position(|c| c == 'i' || c == 'u') {
let (sans_suffix, _) = repr_str.split_at(pos);
err.span_suggestion(
expr.span,
&format!("consider using `{}` instead", sugg_ty),
format!("{}{}", sans_suffix, sugg_ty),
Applicability::MachineApplicable,
);
} else {
err.help(&format!("consider using `{}` instead", sugg_ty));
}
}
err.emit();
}
// This function finds the next fitting type and generates a suggestion string.
// It searches for fitting types in the following way (`X < Y`):
// - `iX`: if literal fits in `uX` => `uX`, else => `iY`
// - `-iX` => `iY`
// - `uX` => `uY`
//
// No suggestion for: `isize`, `usize`.
fn get_type_suggestion(t: Ty<'_>, val: u128, negative: bool) -> Option<&'static str> {
use syntax::ast::IntTy::*;
use syntax::ast::UintTy::*;
macro_rules! find_fit {
($ty:expr, $val:expr, $negative:expr,
$($type:ident => [$($utypes:expr),*] => [$($itypes:expr),*]),+) => {
{
let _neg = if negative { 1 } else { 0 };
match $ty {
$($type => {
$(if !negative && val <= uint_ty_range($utypes).1 {
return Some($utypes.name_str())
})*
$(if val <= int_ty_range($itypes).1 as u128 + _neg {
return Some($itypes.name_str())
})*
None
},)+
_ => None
}
}
}
}
match t.kind {
ty::Int(i) => find_fit!(i, val, negative,
I8 => [U8] => [I16, I32, I64, I128],
I16 => [U16] => [I32, I64, I128],
I32 => [U32] => [I64, I128],
I64 => [U64] => [I128],
I128 => [U128] => []),
ty::Uint(u) => find_fit!(u, val, negative,
U8 => [U8, U16, U32, U64, U128] => [],
U16 => [U16, U32, U64, U128] => [],
U32 => [U32, U64, U128] => [],
U64 => [U64, U128] => [],
U128 => [U128] => []),
_ => None,
}
}
fn lint_int_literal<'a, 'tcx>(
cx: &LateContext<'a, 'tcx>,
type_limits: &TypeLimits,
e: &'tcx hir::Expr<'tcx>,
lit: &hir::Lit,
t: ast::IntTy,
v: u128,
) {
let int_type = t.normalize(cx.sess().target.ptr_width);
let (_, max) = int_ty_range(int_type);
let max = max as u128;
let negative = type_limits.negated_expr_id == e.hir_id;
// Detect literal value out of range [min, max] inclusive
// avoiding use of -min to prevent overflow/panic
if (negative && v > max + 1) || (!negative && v > max) {
if let Some(repr_str) = get_bin_hex_repr(cx, lit) {
report_bin_hex_error(cx, e, attr::IntType::SignedInt(t), repr_str, v, negative);
return;
}
let par_id = cx.tcx.hir().get_parent_node(e.hir_id);
if let Node::Expr(par_e) = cx.tcx.hir().get(par_id) {
if let hir::ExprKind::Struct(..) = par_e.kind {
if is_range_literal(cx.sess().source_map(), par_e)
&& lint_overflowing_range_endpoint(cx, lit, v, max, e, par_e, t.name_str())
{
// The overflowing literal lint was overridden.
return;
}
}
}
cx.span_lint(
OVERFLOWING_LITERALS,
e.span,
&format!("literal out of range for `{}`", t.name_str()),
);
}
}
fn lint_uint_literal<'a, 'tcx>(
cx: &LateContext<'a, 'tcx>,
e: &'tcx hir::Expr<'tcx>,
lit: &hir::Lit,
t: ast::UintTy,
) {
let uint_type = t.normalize(cx.sess().target.ptr_width);
let (min, max) = uint_ty_range(uint_type);
let lit_val: u128 = match lit.node {
// _v is u8, within range by definition
ast::LitKind::Byte(_v) => return,
ast::LitKind::Int(v, _) => v,
_ => bug!(),
};
if lit_val < min || lit_val > max {
let parent_id = cx.tcx.hir().get_parent_node(e.hir_id);
if let Node::Expr(par_e) = cx.tcx.hir().get(parent_id) {
match par_e.kind {
hir::ExprKind::Cast(..) => {
if let ty::Char = cx.tables.expr_ty(par_e).kind {
let mut err = cx.struct_span_lint(
OVERFLOWING_LITERALS,
par_e.span,
"only `u8` can be cast into `char`",
);
err.span_suggestion(
par_e.span,
&"use a `char` literal instead",
format!("'\\u{{{:X}}}'", lit_val),
Applicability::MachineApplicable,
);
err.emit();
return;
}
}
hir::ExprKind::Struct(..) if is_range_literal(cx.sess().source_map(), par_e) => {
let t = t.name_str();
if lint_overflowing_range_endpoint(cx, lit, lit_val, max, e, par_e, t) {
// The overflowing literal lint was overridden.
return;
}
}
_ => {}
}
}
if let Some(repr_str) = get_bin_hex_repr(cx, lit) {
report_bin_hex_error(cx, e, attr::IntType::UnsignedInt(t), repr_str, lit_val, false);
return;
}
cx.span_lint(
OVERFLOWING_LITERALS,
e.span,
&format!("literal out of range for `{}`", t.name_str()),
);
}
}
fn lint_literal<'a, 'tcx>(
cx: &LateContext<'a, 'tcx>,
type_limits: &TypeLimits,
e: &'tcx hir::Expr<'tcx>,
lit: &hir::Lit,
) {
match cx.tables.node_type(e.hir_id).kind {
ty::Int(t) => {
match lit.node {
ast::LitKind::Int(v, ast::LitIntType::Signed(_))
| ast::LitKind::Int(v, ast::LitIntType::Unsuffixed) => {
lint_int_literal(cx, type_limits, e, lit, t, v)
}
_ => bug!(),
};
}
ty::Uint(t) => lint_uint_literal(cx, e, lit, t),
ty::Float(t) => {
let is_infinite = match lit.node {
ast::LitKind::Float(v, _) => match t {
ast::FloatTy::F32 => v.as_str().parse().map(f32::is_infinite),
ast::FloatTy::F64 => v.as_str().parse().map(f64::is_infinite),
},
_ => bug!(),
};
if is_infinite == Ok(true) {
cx.span_lint(
OVERFLOWING_LITERALS,
e.span,
&format!("literal out of range for `{}`", t.name_str()),
);
}
}
_ => {}
}
}
impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits {
fn check_expr(&mut self, cx: &LateContext<'a, 'tcx>, e: &'tcx hir::Expr<'tcx>) {
match e.kind {
hir::ExprKind::Unary(hir::UnNeg, ref expr) => {
// propagate negation, if the negation itself isn't negated
if self.negated_expr_id != e.hir_id {
self.negated_expr_id = expr.hir_id;
}
}
hir::ExprKind::Binary(binop, ref l, ref r) => {
if is_comparison(binop) && !check_limits(cx, binop, &l, &r) {
cx.span_lint(
UNUSED_COMPARISONS,
e.span,
"comparison is useless due to type limits",
);
}
}
hir::ExprKind::Lit(ref lit) => lint_literal(cx, self, e, lit),
_ => {}
};
fn is_valid<T: cmp::PartialOrd>(binop: hir::BinOp, v: T, min: T, max: T) -> bool {
match binop.node {
hir::BinOpKind::Lt => v > min && v <= max,
hir::BinOpKind::Le => v >= min && v < max,
hir::BinOpKind::Gt => v >= min && v < max,
hir::BinOpKind::Ge => v > min && v <= max,
hir::BinOpKind::Eq | hir::BinOpKind::Ne => v >= min && v <= max,
_ => bug!(),
}
}
fn rev_binop(binop: hir::BinOp) -> hir::BinOp {
source_map::respan(
binop.span,
match binop.node {
hir::BinOpKind::Lt => hir::BinOpKind::Gt,
hir::BinOpKind::Le => hir::BinOpKind::Ge,
hir::BinOpKind::Gt => hir::BinOpKind::Lt,
hir::BinOpKind::Ge => hir::BinOpKind::Le,
_ => return binop,
},
)
}
fn check_limits(
cx: &LateContext<'_, '_>,
binop: hir::BinOp,
l: &hir::Expr<'_>,
r: &hir::Expr<'_>,
) -> bool {
let (lit, expr, swap) = match (&l.kind, &r.kind) {
(&hir::ExprKind::Lit(_), _) => (l, r, true),
(_, &hir::ExprKind::Lit(_)) => (r, l, false),
_ => return true,
};
// Normalize the binop so that the literal is always on the RHS in
// the comparison
let norm_binop = if swap { rev_binop(binop) } else { binop };
match cx.tables.node_type(expr.hir_id).kind {
ty::Int(int_ty) => {
let (min, max) = int_ty_range(int_ty);
let lit_val: i128 = match lit.kind {
hir::ExprKind::Lit(ref li) => match li.node {
ast::LitKind::Int(v, ast::LitIntType::Signed(_))
| ast::LitKind::Int(v, ast::LitIntType::Unsuffixed) => v as i128,
_ => return true,
},
_ => bug!(),
};
is_valid(norm_binop, lit_val, min, max)
}
ty::Uint(uint_ty) => {
let (min, max): (u128, u128) = uint_ty_range(uint_ty);
let lit_val: u128 = match lit.kind {
hir::ExprKind::Lit(ref li) => match li.node {
ast::LitKind::Int(v, _) => v,
_ => return true,
},
_ => bug!(),
};
is_valid(norm_binop, lit_val, min, max)
}
_ => true,
}
}
fn is_comparison(binop: hir::BinOp) -> bool {
match binop.node {
hir::BinOpKind::Eq
| hir::BinOpKind::Lt
| hir::BinOpKind::Le
| hir::BinOpKind::Ne
| hir::BinOpKind::Ge
| hir::BinOpKind::Gt => true,
_ => false,
}
}
}
}
declare_lint! {
IMPROPER_CTYPES,
Warn,
"proper use of libc types in foreign modules"
}
declare_lint_pass!(ImproperCTypes => [IMPROPER_CTYPES]);
struct ImproperCTypesVisitor<'a, 'tcx> {
cx: &'a LateContext<'a, 'tcx>,
}
enum FfiResult<'tcx> {
FfiSafe,
FfiPhantom(Ty<'tcx>),
FfiUnsafe { ty: Ty<'tcx>, reason: &'static str, help: Option<&'static str> },
}
fn is_zst<'tcx>(tcx: TyCtxt<'tcx>, did: DefId, ty: Ty<'tcx>) -> bool {
tcx.layout_of(tcx.param_env(did).and(ty)).map(|layout| layout.is_zst()).unwrap_or(false)
}
fn ty_is_known_nonnull<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
match ty.kind {
ty::FnPtr(_) => true,
ty::Ref(..) => true,
ty::Adt(field_def, substs) if field_def.repr.transparent() && !field_def.is_union() => {
for field in field_def.all_fields() {
let field_ty =
tcx.normalize_erasing_regions(ParamEnv::reveal_all(), field.ty(tcx, substs));
if is_zst(tcx, field.did, field_ty) {
continue;
}
let attrs = tcx.get_attrs(field_def.did);
if attrs.iter().any(|a| a.check_name(sym::rustc_nonnull_optimization_guaranteed))
|| ty_is_known_nonnull(tcx, field_ty)
{
return true;
}
}
false
}
_ => false,
}
}
/// Check if this enum can be safely exported based on the
/// "nullable pointer optimization". Currently restricted
/// to function pointers, references, core::num::NonZero*,
/// core::ptr::NonNull, and #[repr(transparent)] newtypes.
/// FIXME: This duplicates code in codegen.
fn is_repr_nullable_ptr<'tcx>(
tcx: TyCtxt<'tcx>,
ty: Ty<'tcx>,
ty_def: &'tcx ty::AdtDef,
substs: SubstsRef<'tcx>,
) -> bool {
if ty_def.variants.len() != 2 {
return false;
}
let get_variant_fields = |index| &ty_def.variants[VariantIdx::new(index)].fields;
let variant_fields = [get_variant_fields(0), get_variant_fields(1)];
let fields = if variant_fields[0].is_empty() {
&variant_fields[1]
} else if variant_fields[1].is_empty() {
&variant_fields[0]
} else {
return false;
};
if fields.len() != 1 {
return false;
}
let field_ty = fields[0].ty(tcx, substs);
if !ty_is_known_nonnull(tcx, field_ty) {
return false;
}
// At this point, the field's type is known to be nonnull and the parent enum is Option-like.
// If the computed size for the field and the enum are different, the nonnull optimization isn't
// being applied (and we've got a problem somewhere).
let compute_size_skeleton = |t| SizeSkeleton::compute(t, tcx, ParamEnv::reveal_all()).unwrap();
if !compute_size_skeleton(ty).same_size(compute_size_skeleton(field_ty)) {
bug!("improper_ctypes: Option nonnull optimization not applied?");
}
true
}
impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
/// Check if the type is array and emit an unsafe type lint.
fn check_for_array_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool {
if let ty::Array(..) = ty.kind {
self.emit_ffi_unsafe_type_lint(
ty,
sp,
"passing raw arrays by value is not FFI-safe",
Some("consider passing a pointer to the array"),
);
true
} else {
false
}
}
/// Checks if the given type is "ffi-safe" (has a stable, well-defined
/// representation which can be exported to C code).
fn check_type_for_ffi(&self, cache: &mut FxHashSet<Ty<'tcx>>, ty: Ty<'tcx>) -> FfiResult<'tcx> {
use FfiResult::*;
let cx = self.cx.tcx;
// Protect against infinite recursion, for example
// `struct S(*mut S);`.
// FIXME: A recursion limit is necessary as well, for irregular
// recursive types.
if !cache.insert(ty) {
return FfiSafe;
}
match ty.kind {
ty::Adt(def, substs) => {
if def.is_phantom_data() {
return FfiPhantom(ty);
}
match def.adt_kind() {
AdtKind::Struct => {
if !def.repr.c() && !def.repr.transparent() {
return FfiUnsafe {
ty,
reason: "this struct has unspecified layout",
help: Some(
"consider adding a `#[repr(C)]` or \
`#[repr(transparent)]` attribute to this struct",
),
};
}
let is_non_exhaustive =
def.non_enum_variant().is_field_list_non_exhaustive();
if is_non_exhaustive && !def.did.is_local() {
return FfiUnsafe {
ty,
reason: "this struct is non-exhaustive",
help: None,
};
}
if def.non_enum_variant().fields.is_empty() {
return FfiUnsafe {
ty,
reason: "this struct has no fields",
help: Some("consider adding a member to this struct"),
};
}
// We can't completely trust repr(C) and repr(transparent) markings;
// make sure the fields are actually safe.
let mut all_phantom = true;
for field in &def.non_enum_variant().fields {
let field_ty = cx.normalize_erasing_regions(
ParamEnv::reveal_all(),
field.ty(cx, substs),
);
// repr(transparent) types are allowed to have arbitrary ZSTs, not just
// PhantomData -- skip checking all ZST fields
if def.repr.transparent() && is_zst(cx, field.did, field_ty) {
continue;
}
let r = self.check_type_for_ffi(cache, field_ty);
match r {
FfiSafe => {
all_phantom = false;
}
FfiPhantom(..) => {}
FfiUnsafe { .. } => {
return r;
}
}
}
if all_phantom { FfiPhantom(ty) } else { FfiSafe }
}
AdtKind::Union => {
if !def.repr.c() && !def.repr.transparent() {
return FfiUnsafe {
ty,
reason: "this union has unspecified layout",
help: Some(
"consider adding a `#[repr(C)]` or \
`#[repr(transparent)]` attribute to this union",
),
};
}
if def.non_enum_variant().fields.is_empty() {
return FfiUnsafe {
ty,
reason: "this union has no fields",
help: Some("consider adding a field to this union"),
};
}
let mut all_phantom = true;
for field in &def.non_enum_variant().fields {
let field_ty = cx.normalize_erasing_regions(
ParamEnv::reveal_all(),
field.ty(cx, substs),
);
// repr(transparent) types are allowed to have arbitrary ZSTs, not just
// PhantomData -- skip checking all ZST fields.
if def.repr.transparent() && is_zst(cx, field.did, field_ty) {
continue;
}
let r = self.check_type_for_ffi(cache, field_ty);
match r {
FfiSafe => {
all_phantom = false;
}
FfiPhantom(..) => {}
FfiUnsafe { .. } => {
return r;
}
}
}
if all_phantom { FfiPhantom(ty) } else { FfiSafe }
}
AdtKind::Enum => {
if def.variants.is_empty() {
// Empty enums are okay... although sort of useless.
return FfiSafe;
}
// Check for a repr() attribute to specify the size of the
// discriminant.
if !def.repr.c() && !def.repr.transparent() && def.repr.int.is_none() {
// Special-case types like `Option<extern fn()>`.
if !is_repr_nullable_ptr(cx, ty, def, substs) {
return FfiUnsafe {
ty,
reason: "enum has no representation hint",
help: Some(
"consider adding a `#[repr(C)]`, \
`#[repr(transparent)]`, or integer `#[repr(...)]` \
attribute to this enum",
),
};
}
}
if def.is_variant_list_non_exhaustive() && !def.did.is_local() {
return FfiUnsafe {
ty,
reason: "this enum is non-exhaustive",
help: None,
};
}
// Check the contained variants.
for variant in &def.variants {
let is_non_exhaustive = variant.is_field_list_non_exhaustive();
if is_non_exhaustive && !variant.def_id.is_local() {
return FfiUnsafe {
ty,
reason: "this enum has non-exhaustive variants",
help: None,
};
}
for field in &variant.fields {
let field_ty = cx.normalize_erasing_regions(
ParamEnv::reveal_all(),
field.ty(cx, substs),
);
// repr(transparent) types are allowed to have arbitrary ZSTs, not
// just PhantomData -- skip checking all ZST fields.
if def.repr.transparent() && is_zst(cx, field.did, field_ty) {
continue;
}
let r = self.check_type_for_ffi(cache, field_ty);
match r {
FfiSafe => {}
FfiUnsafe { .. } => {
return r;
}
FfiPhantom(..) => {
return FfiUnsafe {
ty,
reason: "this enum contains a PhantomData field",
help: None,
};
}
}
}
}
FfiSafe
}
}
}
ty::Char => FfiUnsafe {
ty,
reason: "the `char` type has no C equivalent",
help: Some("consider using `u32` or `libc::wchar_t` instead"),
},
ty::Int(ast::IntTy::I128) | ty::Uint(ast::UintTy::U128) => FfiUnsafe {
ty,
reason: "128-bit integers don't currently have a known stable ABI",
help: None,
},
// Primitive types with a stable representation.
ty::Bool | ty::Int(..) | ty::Uint(..) | ty::Float(..) | ty::Never => FfiSafe,
ty::Slice(_) => FfiUnsafe {
ty,
reason: "slices have no C equivalent",
help: Some("consider using a raw pointer instead"),
},
ty::Dynamic(..) => {
FfiUnsafe { ty, reason: "trait objects have no C equivalent", help: None }
}
ty::Str => FfiUnsafe {
ty,
reason: "string slices have no C equivalent",
help: Some("consider using `*const u8` and a length instead"),
},
ty::Tuple(..) => FfiUnsafe {
ty,
reason: "tuples have unspecified layout",
help: Some("consider using a struct instead"),
},
ty::RawPtr(ty::TypeAndMut { ty, .. }) | ty::Ref(_, ty, _) => {
self.check_type_for_ffi(cache, ty)
}
ty::Array(inner_ty, _) => self.check_type_for_ffi(cache, inner_ty),
ty::FnPtr(sig) => {
match sig.abi() {
Abi::Rust | Abi::RustIntrinsic | Abi::PlatformIntrinsic | Abi::RustCall => {
return FfiUnsafe {
ty,
reason: "this function pointer has Rust-specific calling convention",
help: Some(
"consider using an `extern fn(...) -> ...` \
function pointer instead",
),
};
}
_ => {}
}
let sig = cx.erase_late_bound_regions(&sig);
if !sig.output().is_unit() {
let r = self.check_type_for_ffi(cache, sig.output());
match r {
FfiSafe => {}
_ => {
return r;
}
}
}
for arg in sig.inputs() {
let r = self.check_type_for_ffi(cache, arg);
match r {
FfiSafe => {}
_ => {
return r;
}
}
}
FfiSafe
}
ty::Foreign(..) => FfiSafe,
ty::Param(..)
| ty::Infer(..)
| ty::Bound(..)
| ty::Error
| ty::Closure(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
| ty::Placeholder(..)
| ty::UnnormalizedProjection(..)
| ty::Projection(..)
| ty::Opaque(..)
| ty::FnDef(..) => bug!("unexpected type in foreign function: {:?}", ty),
}
}
fn emit_ffi_unsafe_type_lint(
&mut self,
ty: Ty<'tcx>,
sp: Span,
note: &str,
help: Option<&str>,
) {
let mut diag = self.cx.struct_span_lint(
IMPROPER_CTYPES,
sp,
&format!("`extern` block uses type `{}`, which is not FFI-safe", ty),
);
diag.span_label(sp, "not FFI-safe");
if let Some(help) = help {
diag.help(help);
}
diag.note(note);
if let ty::Adt(def, _) = ty.kind {
if let Some(sp) = self.cx.tcx.hir().span_if_local(def.did) {
diag.span_note(sp, "type defined here");
}
}
diag.emit();
}
fn check_for_opaque_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool {
use crate::rustc::ty::TypeFoldable;
struct ProhibitOpaqueTypes<'tcx> {
ty: Option<Ty<'tcx>>,
};
impl<'tcx> ty::fold::TypeVisitor<'tcx> for ProhibitOpaqueTypes<'tcx> {
fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool {
if let ty::Opaque(..) = ty.kind {
self.ty = Some(ty);
true
} else {
ty.super_visit_with(self)
}
}
}
let mut visitor = ProhibitOpaqueTypes { ty: None };
ty.visit_with(&mut visitor);
if let Some(ty) = visitor.ty {
self.emit_ffi_unsafe_type_lint(ty, sp, "opaque types have no C equivalent", None);
true
} else {
false
}
}
fn check_type_for_ffi_and_report_errors(&mut self, sp: Span, ty: Ty<'tcx>, is_static: bool) {
// We have to check for opaque types before `normalize_erasing_regions`,
// which will replace opaque types with their underlying concrete type.
if self.check_for_opaque_ty(sp, ty) {
// We've already emitted an error due to an opaque type.
return;
}
// it is only OK to use this function because extern fns cannot have
// any generic types right now:
let ty = self.cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty);
// C doesn't really support passing arrays by value.
// The only way to pass an array by value is through a struct.
// So we first test that the top level isn't an array,
// and then recursively check the types inside.
if !is_static && self.check_for_array_ty(sp, ty) {
return;
}
match self.check_type_for_ffi(&mut FxHashSet::default(), ty) {
FfiResult::FfiSafe => {}
FfiResult::FfiPhantom(ty) => {
self.emit_ffi_unsafe_type_lint(ty, sp, "composed only of `PhantomData`", None);
}
FfiResult::FfiUnsafe { ty, reason, help } => {
self.emit_ffi_unsafe_type_lint(ty, sp, reason, help);
}
}
}
fn check_foreign_fn(&mut self, id: hir::HirId, decl: &hir::FnDecl<'_>) |
fn check_foreign_static(&mut self, id: hir::HirId, span: Span) {
let def_id = self.cx.tcx.hir().local_def_id(id);
let ty = self.cx.tcx.type_of(def_id);
self.check_type_for_ffi_and_report_errors(span, ty, true);
}
}
impl<'a, 'tcx> LateLintPass<'a, 'tcx> for ImproperCTypes {
fn check_foreign_item(&mut self, cx: &LateContext<'_, '_>, it: &hir::ForeignItem<'_>) {
let mut vis = ImproperCTypesVisitor { cx };
let abi = cx.tcx.hir().get_foreign_abi(it.hir_id);
if let Abi::Rust | Abi::RustCall | Abi::RustIntrinsic | Abi::PlatformIntrinsic = abi {
// Don't worry about types in internal ABIs.
} else {
match it.kind {
hir::ForeignItemKind::Fn(ref decl, _, _) => {
vis.check_foreign_fn(it.hir_id, decl);
}
hir::ForeignItemKind::Static(ref ty, _) => {
vis.check_foreign_static(it.hir_id, ty.span);
}
hir::ForeignItemKind::Type => (),
}
}
}
}
declare_lint_pass!(VariantSizeDifferences => [VARIANT_SIZE_DIFFERENCES]);
impl<'a, 'tcx> LateLintPass<'a, 'tcx> for VariantSizeDifferences {
fn check_item(&mut self, cx: &LateContext<'_, '_>, it: &hir::Item<'_>) {
if let hir::ItemKind::Enum(ref enum_definition, _) = it.kind {
let item_def_id = cx.tcx.hir().local_def_id(it.hir_id);
let t = cx.tcx.type_of(item_def_id);
let ty = cx.tcx.erase_regions(&t);
let layout = match cx.layout_of(ty) {
Ok(layout) => layout,
Err(ty::layout::LayoutError::Unknown(_)) => return,
Err(err @ ty::layout::LayoutError::SizeOverflow(_)) => {
bug!("failed to get layout for `{}`: {}", t, err);
}
};
let (variants, tag) = match layout.variants {
layout::Variants::Multiple {
discr_kind: layout::DiscriminantKind::Tag,
ref discr,
ref variants,
..
} => (variants, discr),
_ => return,
};
let discr_size = tag.value.size(&cx.tcx).bytes();
debug!(
"enum `{}` is {} bytes large with layout:\n{:#?}",
t,
layout.size.bytes(),
layout
);
let (largest, slargest, largest_index) = enum_definition
.variants
.iter()
.zip(variants)
.map(|(variant, variant_layout)| {
// Subtract the size of the enum discriminant.
let bytes = variant_layout.size.bytes().saturating_sub(discr_size);
debug!("- variant `{}` is {} bytes large", variant.ident, bytes);
bytes
})
.enumerate()
.fold((0, 0, 0), |(l, s, li), (idx, size)| {
if size > l {
(size, l, idx)
} else if size > s {
(l, size, li)
} else {
(l, s, li)
}
});
// We only warn if the largest variant is at least thrice as large as
// the second-largest.
if largest > slargest * 3 && slargest > 0 {
cx.span_lint(
VARIANT_SIZE_DIFFERENCES,
enum_definition.variants[largest_index].span,
&format!(
"enum variant is more than three times \
larger ({} bytes) than the next largest",
largest
),
);
}
}
}
}
| {
let def_id = self.cx.tcx.hir().local_def_id(id);
let sig = self.cx.tcx.fn_sig(def_id);
let sig = self.cx.tcx.erase_late_bound_regions(&sig);
for (input_ty, input_hir) in sig.inputs().iter().zip(decl.inputs) {
self.check_type_for_ffi_and_report_errors(input_hir.span, input_ty, false);
}
if let hir::Return(ref ret_hir) = decl.output {
let ret_ty = sig.output();
if !ret_ty.is_unit() {
self.check_type_for_ffi_and_report_errors(ret_hir.span, ret_ty, false);
}
}
} |
header.tsx | import { Link } from "gatsby"
import PropTypes from "prop-types"
import React from "react"
import styled from "styled-components"
interface IHeaderProps {
siteTitle: string;
}
const Header = ({ siteTitle }: IHeaderProps) => {
console.log(siteTitle)
return (
<HeaderWapper>
<HeaderContainer>
<HeaderTitle>
<HeaderLink to="/">
{siteTitle}
</HeaderLink>
</HeaderTitle>
</HeaderContainer>
</HeaderWapper>
)
}
const HeaderWapper = styled.header`
background: rebeccapurple;
margin-bottom: 1.45rem;
`
const HeaderContainer = styled.div`
margin: 0 auto;
max-width: 960px;
padding: 1.45rem 1.0875rem;
`
const HeaderTitle = styled.h1`
margin: 0;
`
const HeaderLink = styled(Link)`
color: white;
text-decoration: none;
`
Header.propTypes = {
siteTitle: PropTypes.string,
}
Header.defaultProps = {
siteTitle: ``,
}
|
export default Header |
|
cabi_powerpc64.rs | // Copyright 2014-2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// FIXME: The PowerPC64 ABI needs to zero or sign extend function
// call parameters, but compute_abi_info() is passed LLVM types
// which have no sign information.
//
// Alignment of 128 bit types is not currently handled, this will
// need to be fixed when PowerPC vector support is added.
use llvm::{Integer, Pointer, Float, Double, Struct, Array, Attribute};
use trans::cabi::{FnType, ArgType};
use trans::context::CrateContext;
use trans::type_::Type;
use std::cmp;
fn align_up_to(off: usize, a: usize) -> usize {
return (off + a - 1) / a * a;
}
fn align(off: usize, ty: Type) -> usize {
let a = ty_align(ty);
return align_up_to(off, a);
}
fn ty_align(ty: Type) -> usize {
match ty.kind() {
Integer => ((ty.int_width() as usize) + 7) / 8,
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
}
}
Array => {
let elt = ty.element_type();
ty_align(elt)
}
_ => panic!("ty_align: unhandled type")
}
}
fn ty_size(ty: Type) -> usize {
match ty.kind() {
Integer => ((ty.int_width() as usize) + 7) / 8,
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
let str_tys = ty.field_types(); | align(size, ty)
}
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
_ => panic!("ty_size: unhandled type")
}
}
fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> {
fn check_array(ty: Type) -> Option<(Type, u64)> {
let len = ty.array_length() as u64;
if len == 0 {
return None
}
let elt = ty.element_type();
// if our element is an HFA/HVA, so are we; multiply members by our len
is_homogenous_aggregate_ty(elt).map(|(base_ty, members)| (base_ty, len * members))
}
fn check_struct(ty: Type) -> Option<(Type, u64)> {
let str_tys = ty.field_types();
if str_tys.len() == 0 {
return None
}
let mut prev_base_ty = None;
let mut members = 0;
for opt_homog_agg in str_tys.iter().map(|t| is_homogenous_aggregate_ty(*t)) {
match (prev_base_ty, opt_homog_agg) {
// field isn't itself an HFA, so we aren't either
(_, None) => return None,
// first field - store its type and number of members
(None, Some((field_ty, field_members))) => {
prev_base_ty = Some(field_ty);
members = field_members;
},
// 2nd or later field - give up if it's a different type; otherwise incr. members
(Some(prev_ty), Some((field_ty, field_members))) => {
if prev_ty != field_ty {
return None;
}
members += field_members;
}
}
}
// Because of previous checks, we know prev_base_ty is Some(...) because
// 1. str_tys has at least one element; and
// 2. prev_base_ty was filled in (or we would've returned early)
let (base_ty, members) = (prev_base_ty.unwrap(), members);
// Ensure there is no padding.
if ty_size(ty) == ty_size(base_ty) * (members as usize) {
Some((base_ty, members))
} else {
None
}
}
let homog_agg = match ty.kind() {
Float => Some((ty, 1)),
Double => Some((ty, 1)),
Array => check_array(ty),
Struct => check_struct(ty),
_ => None
};
// Ensure we have at most eight uniquely addressable members
homog_agg.and_then(|(base_ty, members)| {
if members > 0 && members <= 8 {
Some((base_ty, members))
} else {
None
}
})
}
fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType {
if is_reg_ty(ty) {
let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
return ArgType::direct(ty, None, None, attr);
}
// The PowerPC64 big endian ABI doesn't return aggregates in registers
if ccx.sess().target.target.target_endian == "big" {
return ArgType::indirect(ty, Some(Attribute::StructRet))
}
if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ty) {
let llty = Type::array(&base_ty, members);
return ArgType::direct(ty, Some(llty), None, None);
}
let size = ty_size(ty);
if size <= 16 {
let llty = if size <= 1 {
Type::i8(ccx)
} else if size <= 2 {
Type::i16(ccx)
} else if size <= 4 {
Type::i32(ccx)
} else if size <= 8 {
Type::i64(ccx)
} else {
Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64)
};
return ArgType::direct(ty, Some(llty), None, None);
}
ArgType::indirect(ty, Some(Attribute::StructRet))
}
fn classify_arg_ty(ccx: &CrateContext, ty: Type) -> ArgType {
if is_reg_ty(ty) {
let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
return ArgType::direct(ty, None, None, attr);
}
if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ty) {
let llty = Type::array(&base_ty, members);
return ArgType::direct(ty, Some(llty), None, None);
}
ArgType::direct(
ty,
Some(struct_ty(ccx, ty)),
None,
None
)
}
fn is_reg_ty(ty: Type) -> bool {
match ty.kind() {
Integer
| Pointer
| Float
| Double => true,
_ => false
}
}
fn coerce_to_long(ccx: &CrateContext, size: usize) -> Vec<Type> {
let long_ty = Type::i64(ccx);
let mut args = Vec::new();
let mut n = size / 64;
while n > 0 {
args.push(long_ty);
n -= 1;
}
let r = size % 64;
if r > 0 {
args.push(Type::ix(ccx, r as u64));
}
args
}
fn struct_ty(ccx: &CrateContext, ty: Type) -> Type {
let size = ty_size(ty) * 8;
Type::struct_(ccx, &coerce_to_long(ccx, size), false)
}
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
let ret_ty = if ret_def {
classify_ret_ty(ccx, rty)
} else {
ArgType::direct(Type::void(ccx), None, None, None)
};
let mut arg_tys = Vec::new();
for &aty in atys {
let ty = classify_arg_ty(ccx, aty);
arg_tys.push(ty);
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
} | str_tys.iter().fold(0, |s, t| s + ty_size(*t))
} else {
let str_tys = ty.field_types();
let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t)); |
server.go | package main
import (
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
"time"
)
type ServerOptions struct {
Port int
Burst int
Concurrency int
HTTPCacheTTL int
HTTPReadTimeout int
HTTPWriteTimeout int
MaxAllowedSize int
CORS bool
Gzip bool // deprecated
AuthForwarding bool
EnableURLSource bool
EnableAWSS3Source bool
EnablePlaceholder bool
EnableURLSignature bool
URLSignatureKey string
Address string
PathPrefix string
APIKey string
Mount string
AWSConfigPath string
CertFile string
KeyFile string
Authorization string
Placeholder string
ForwardHeaders []string
PlaceholderImage []byte
Endpoints Endpoints
AllowedOrigins []*url.URL
}
// Endpoints represents a list of endpoint names to disable.
type Endpoints []string
// IsValid validates if a given HTTP request endpoint is valid or not.
func (e Endpoints) IsValid(r *http.Request) bool {
parts := strings.Split(r.URL.Path, "/")
endpoint := parts[len(parts)-1]
for _, name := range e {
if endpoint == name {
return false
}
}
return true
}
func Server(o ServerOptions) error {
addr := o.Address + ":" + strconv.Itoa(o.Port)
handler := NewLog(NewServerMux(o), os.Stdout)
server := &http.Server{
Addr: addr,
Handler: handler,
MaxHeaderBytes: 1 << 20,
ReadTimeout: time.Duration(o.HTTPReadTimeout) * time.Second,
WriteTimeout: time.Duration(o.HTTPWriteTimeout) * time.Second,
}
return listenAndServe(server, o)
}
func listenAndServe(s *http.Server, o ServerOptions) error |
func join(o ServerOptions, route string) string {
return path.Join(o.PathPrefix, route)
}
// NewServerMux creates a new HTTP server route multiplexer.
func NewServerMux(o ServerOptions) http.Handler {
mux := http.NewServeMux()
mux.Handle(join(o, "/"), Middleware(indexController, o))
mux.Handle(join(o, "/form"), Middleware(formController, o))
mux.Handle(join(o, "/health"), Middleware(healthController, o))
image := ImageMiddleware(o)
mux.Handle(join(o, "/resize"), image(Resize))
mux.Handle(join(o, "/fit"), image(Fit))
mux.Handle(join(o, "/enlarge"), image(Enlarge))
mux.Handle(join(o, "/extract"), image(Extract))
mux.Handle(join(o, "/crop"), image(Crop))
mux.Handle(join(o, "/smartcrop"), image(SmartCrop))
mux.Handle(join(o, "/rotate"), image(Rotate))
mux.Handle(join(o, "/flip"), image(Flip))
mux.Handle(join(o, "/flop"), image(Flop))
mux.Handle(join(o, "/thumbnail"), image(Thumbnail))
mux.Handle(join(o, "/zoom"), image(Zoom))
mux.Handle(join(o, "/convert"), image(Convert))
mux.Handle(join(o, "/watermark"), image(Watermark))
mux.Handle(join(o, "/watermarkimage"), image(WatermarkImage))
mux.Handle(join(o, "/info"), image(Info))
mux.Handle(join(o, "/blur"), image(GaussianBlur))
mux.Handle(join(o, "/pipeline"), image(Pipeline))
mux.Handle(join(o, "/v2pipeline"), image(Pipeline))
return mux
}
| {
if o.CertFile != "" && o.KeyFile != "" {
return s.ListenAndServeTLS(o.CertFile, o.KeyFile)
}
return s.ListenAndServe()
} |
log.go | // Copyright (c) 2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package indexers
import "github.com/pkt-cash/pktd/pktlog"
// log is a logger that is initialized with no output filters. This
// means the package will not perform any logging by default until the caller
// requests it.
var log pktlog.Logger
// The default amount of logging is none.
func init() {
DisableLog()
}
// DisableLog disables all library log output. Logging output is disabled
// by default until either UseLogger or SetLogWriter are called.
func DisableLog() {
log = pktlog.Disabled
}
// UseLogger uses a specified Logger to output package logging info.
// This should be used in preference to SetLogWriter if the caller is also
// using pktlog.
func UseLogger(logger pktlog.Logger) | {
log = logger
} |
|
main.go | package main
import (
"flag"
"fmt"
"io/ioutil"
"path/filepath"
dm "github.com/zerozwt/BLiveDanmaku"
)
func main() {
sess_data := ""
jct := ""
file_name := ""
flag.StringVar(&file_name, "file", "", "picture to upload")
flag.StringVar(&sess_data, "sess_data", "", "your SESS_DATA")
flag.StringVar(&jct, "jct", "", "your JCT")
flag.Parse()
if len(sess_data) == 0 |
if len(jct) == 0 {
fmt.Println("jct is empty")
return
}
if len(file_name) == 0 {
fmt.Println("file is empty")
return
}
// open file
data, err := ioutil.ReadFile(file_name)
if err != nil {
fmt.Printf("open picture file %s failed: %v\n", file_name, err)
return
}
info, err := dm.UploadPic(data, filepath.Base(file_name), sess_data, jct)
if err != nil {
fmt.Printf("upload file to bilibili failed: %v\n", err)
return
}
fmt.Printf("uploaded file: %+v\n", info)
}
| {
fmt.Println("sess_data is empty")
return
} |
rate-line-sampler.rs | use std::env;
use std::process;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
use rand::distributions::{Uniform, Distribution};
fn main() -> std::io::Result<()> {
let args: Vec<String> = env::args().collect();
if args.len() != 4 |
let sample_rate: f64 = (&args[3]).parse::<f64>().expect("<sample_rate> should be a f64.");
if sample_rate > 1.0_f64 || sample_rate < 0.0_f64 {
eprintln!("<sample_rate> invalid");
process::exit(-1);
}
let input_file = File::open(&args[1])?;
let mut output_file = File::create(&args[2])?;
let mut buf_reader = BufReader::new(&input_file);
let dis = Uniform::from(0.0f64..1.0f64);
let mut rng = rand::thread_rng();
let mut cnt: usize = 0;
loop {
let mut buf: String = String::new();
let read_len = buf_reader.read_line(&mut buf)?;
if read_len == 0 {
break;
} else {
if dis.sample(&mut rng) <= sample_rate {
cnt += 1;
output_file.write(&buf.as_bytes())?;
}
}
}
println!("Sampling done: {} lines has been sampled.", cnt);
Ok(())
}
| {
eprintln!("Usage: rate-line-sampler <input_file> <output_file> <sample_rate>");
process::exit(-1);
} |
layouts.py | from pyramid_layout.layout import layout_config
@layout_config(template='h:templates/base.pt')
class | (object):
csp = None
inline_webfont = True
requirements = (('app', None),)
def __init__(self, context, request):
self.context = context
self.request = request
self.forms = {}
def add_form(self, form):
if form.formid in self.forms:
raise ValueError('duplicate form id "%s"' % form.formid)
self.forms[form.formid] = form
def get_widget_requirements(self):
requirements = []
requirements.extend(self.requirements)
for form in self.forms.values():
requirements.extend(form.get_widget_requirements())
return requirements
def get_widget_resources(self):
requirements = self.get_widget_requirements()
return self.request.registry.resources(requirements)
@property
def css_links(self):
return self.get_widget_resources()['css']
@property
def js_links(self):
return self.get_widget_resources()['js']
@layout_config(name='sidebar', template='h:templates/base.pt')
class SidebarLayout(BaseLayout):
requirements = (('app', None), ('sidebar', None))
def includeme(config):
config.include('pyramid_layout')
config.scan(__name__)
| BaseLayout |
__init__.py | from .document import DocumentArray
from .querylang import QueryLangArray
from .chunk import ChunkArray
from .match import MatchArray | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
|
|
litmus-portal.ts | import { createTheme } from './base';
const litmusPortalTheme = createTheme({
palette: {
primary: {
main: '#5B44BA',
light: '#858CDD',
dark: '#4028A0',
},
secondary: {
main: '#109B67',
light: '#858CDD',
dark: '#128359',
},
success: {
main: '#109B67',
light: '#109B6710',
dark: '#128359',
},
error: {
light: '#CA2C2C10',
main: '#CA2C2C',
dark: '#A62F28',
}, | },
background: {
default: '#FAFBFD',
paper: '#FFFFFF',
},
sidebarMenu: '#FAFBFD',
loginBackground: '#FFFFFF',
disabledBackground: '#E6E6E6',
text: {
primary: '#101217',
secondary: '#FFFFFF',
disabled: '#777777',
hint: '#777777',
},
highlight: '#5B44BA',
horizontalStepper: {
completed: '#5D6173',
active: '#2CCA8F',
pending: '#B9B9B9',
},
border: {
main: '#B9B9B9',
success: '#109B67',
error: '#CA2C2C',
},
progressBarGradient:
'linear-gradient(90.43deg, #5B44BA 0.35%, #858CDD 51.03%, #109B67 99.64%)',
status: {
running: {
text: '#F6FB92B',
background: '#F6FB92B20',
},
completed: {
text: '#109B67',
background: '#109B6720',
},
pending: {
text: '#B9B9B9',
background: '#B9B9B920',
},
failed: {
text: '#CA2C2C',
background: '#CA2C2C20',
},
},
cards: {
background: '#FFFFFF',
highlight: '#109B6710',
},
},
});
export { litmusPortalTheme }; | warning: {
light: '#F6B92B20',
main: '#F6B92B',
dark: '#402C01', |
bots.py | from boardgame import BaseBot, BaseBoard, BasePlayer, Move
from hnefatafl.engine import PieceType, variants
from hnefatafl import MODEL_PATH, MODEL_CONFIG_PATH
from alphazero.GenericPlayers import MCTSPlayer, NNPlayer
from threading import Lock
import os
import importlib
import random
import pyximport, numpy
pyximport.install(setup_args={'include_dirs': numpy.get_include()})
class AlphaZeroBot(BaseBot):
def __init__(self, player: BasePlayer, game_variant=variants.hnefatafl, use_mcts=True, use_default_args=True,
load=True, args=None, *a, **k):
super().__init__(player)
self.use_mcts = use_mcts
self.use_default_args = use_default_args
self.game_variant = game_variant
self._game = None
self._args = args
self._model_player = None
self.__a = a
self.__k = k
self.result = None
self._result_lock = Lock()
if load: self.load_model(MODEL_PATH)
def reset(self):
from alphazero.envs.tafl.train_fastafl import args
from alphazero.envs.tafl.brandubh import Game
self._args = self._args if self._args is not None else args
self._game = Game()
if self._model_player and self.use_mcts:
self._model_player.mcts.reset()
def update(self, board: BaseBoard, move: Move):
if self.use_mcts:
from alphazero.envs.tafl.brandubh import get_action
self._game._board = board
self._game._player = 2 - board.to_play().value
self._game._turns = board.num_turns
self._model_player.update(self._game, get_action(board, move))
def load_model(self, model_path: str):
from alphazero.NNetWrapper import NNetWrapper
self.reset()
nn = NNetWrapper(type(self._game), self._args)
nn.load_checkpoint('.', model_path)
self.__k['args'] = self._args if not self.use_default_args else None
if self.__k.get('verbose'): print('Loading model with args:', self.__k['args'])
cls = MCTSPlayer if self.use_mcts else NNPlayer
self._model_player = cls(type(self._game), nn, *self.__a, **self.__k)
def get_move(self, board: BaseBoard) -> Move or None:
|
class AlphaZeroFastaflBot(BaseBot):
def __init__(self, player: BasePlayer, game_variant=variants.hnefatafl, use_mcts=True, use_default_args=True,
load=True, args=None, *a, **k):
super().__init__(player)
self.use_mcts = use_mcts
self.use_default_args = use_default_args
self.game_variant = game_variant
self._game = None
self._args = args
self._model_player = None
self.__a = a
self.__k = k
self.result = None
self._result_lock = Lock()
if load: self.load_model(MODEL_PATH)
def reset(self):
from alphazero.envs.tafl.train_fastafl import args
from alphazero.envs.tafl.fastafl import Game
self._args = self._args if self._args is not None else args
self._game = Game()
if self._model_player and self.use_mcts:
self._model_player.mcts.reset()
def update(self, board: BaseBoard, move: Move):
if self.use_mcts:
from alphazero.envs.tafl.fastafl import get_action
from fastafl
self._game._board = board
self._game._player = 2 - board.to_play().value
self._game._turns = board.num_turns
self._model_player.update(self._game, get_action(board, move))
def load_model(self, model_path: str):
from alphazero.NNetWrapper import NNetWrapper
self.reset()
nn = NNetWrapper(type(self._game), self._args)
nn.load_checkpoint('.', model_path)
self.__k['args'] = self._args if not self.use_default_args else None
if self.__k.get('verbose'): print('Loading model with args:', self.__k['args'])
cls = MCTSPlayer if self.use_mcts else NNPlayer
self._model_player = cls(type(self._game), nn, *self.__a, **self.__k)
def get_move(self, board: BaseBoard) -> Move or None:
self.result = None
from alphazero.envs.tafl.fastafl import get_move
self._game._board = board
self._game._player = 2 - board.to_play().value
self._game._turns = board.num_turns
action = self._model_player(self._game)
move = get_move(board, action)
self._result_lock.acquire()
self.result = move
self._result_lock.release()
return move
class MuZeroBot(BaseBot):
def __init__(self, player: BasePlayer, load=True, use_new_config=True):
super().__init__(player)
self.use_new_config = use_new_config
self._model = None
self._config_module = None
self._config_game = None
self._config_params = None
self.history = None
self.result = None
self._result_lock = Lock()
if load: self.load_model(MODEL_PATH, MODEL_CONFIG_PATH)
def load_model(self, model_path, config_path=None):
from muzero_general.models import MuZeroNetwork
import torch
if not os.path.exists(model_path):
raise IOError(f'The file {model_path} does not exist, could not load model.')
if config_path and not self.use_new_config:
try:
self._config_module = importlib.import_module(
config_path.replace('.py', '') if '.py' in config_path else config_path
)
except ModuleNotFoundError:
print(f'WARNING: The default model config file {config_path} could not be loaded, using config from model file instead.')
self.use_new_config = True
else:
self._config_game = self._config_module.Game()
self._config_params = self._config_module.MuZeroConfig()
checkpoint = torch.load(model_path)
if self.use_new_config and checkpoint.get('config'): self._config_params = checkpoint['config']
self._model = MuZeroNetwork(self._config_params)
self._model.set_weights(checkpoint['weights'])
device = "cuda" if torch.cuda.is_available() else "cpu"
print('Running model on ' + device)
self._model.to(torch.device(device))
self._model.eval()
@staticmethod
def select_action(node, temperature):
"""
Select action according to the visit count distribution and the temperature.
The temperature is changed dynamically with the visit_softmax_temperature function
in the config.
"""
import numpy as np
visit_counts = np.array(
[child.visit_count for child in node.children.values()], dtype="int32"
)
actions = [action for action in node.children.keys()]
if temperature == 0:
action = actions[np.argmax(visit_counts)]
elif temperature == float("inf"):
action = np.random.choice(actions)
else:
# See paper appendix Data Generation
visit_count_distribution = visit_counts ** (1 / temperature)
visit_count_distribution = visit_count_distribution / sum(
visit_count_distribution
)
action = np.random.choice(actions, p=visit_count_distribution)
return action
def get_move(self, board: BaseBoard) -> Move or None:
from muzero_general.self_play import MCTS, GameHistory
import torch
self.result = None
observation = self._config_module.get_observation(
board,
1 if self.player.white else 0
)
if not self.history:
self.history = GameHistory()
self.history.action_history.append(0)
self.history.observation_history.append(observation)
stacked_observations = self.history.get_stacked_observations(
-1,
self._config_params.stacked_observations,
)
with torch.no_grad():
root, mcts_info = MCTS(self._config_params).run(
self._model,
stacked_observations,
self._config_game.legal_actions(board),
self._config_game.to_play(board),
True,
)
action = self.select_action(root, 0)
move = self._config_module.get_move(board, action)
self._result_lock.acquire()
self.result = move
self._result_lock.release()
[print(f'{k}: {v} ', end='') for k, v in mcts_info.items()]
print()
self.history.store_search_statistics(root, self._config_params.action_space)
self.history.action_history.append(action)
self.history.observation_history.append(observation)
return move
class RandomBot(BaseBot):
def get_move(self, board: BaseBoard) -> Move or None:
if not board.all_valid_moves(PieceType.white if self.player.white else PieceType.black): return
if self.player.white:
pieces = [p for p in board.pieces if p.is_white]
else:
pieces = [p for p in board.pieces if p.is_black]
while True:
piece = random.choice(pieces)
moves = list(board.valid_moves(piece))
if moves: break
return random.choice(moves)
| self.result = None
from alphazero.envs.tafl.brandubh import get_move
self._game._board = board
self._game._player = 2 - board.to_play().value
self._game._turns = board.num_turns
action = self._model_player(self._game)
move = get_move(board, action)
self._result_lock.acquire()
self.result = move
self._result_lock.release()
return move |
dbpediaoccupation.py | """
This file offers the methods to automatically retrieve the graph dbpedia-occupation.
The graph is automatically retrieved from the NetworkRepository repository.
References
---------------------
Please cite the following if you use the data:
```bib
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def | (
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/networkrepository",
version: str = "latest",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the dbpedia-occupation graph.
The graph is automatically retrieved from the NetworkRepository repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "latest"
The version of the graph to retrieve.
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of dbpedia-occupation graph.
References
---------------------
Please cite the following if you use the data:
```bib
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="DbpediaOccupation",
repository="networkrepository",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| DbpediaOccupation |
hit_test.go | // Copyright (c) 2015, Marian Kopriva
// All rights reserved.
// Licensed under BSD, see LICENSE for details.
package hit
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"testing"
)
var requestExecuteTests = []struct {
method string
path string
r Request
err error
}{
{"GET", "/foo/bar", Request{false, nil, nil, Response{200, nil, nil}}, nil},
{"GET", "/foo/bar", Request{false, Header{"Auth": {"6tygfd4"}}, nil, Response{
201,
Header{"Foo": {"baz"}},
JSONBody{"Hello": "World"},
}}, fmt.Errorf(
" %sGET /foo/bar%s Header: %smap[Auth:[6tygfd4]]%s\n"+
"StatusCode got = %s200%s, want %s201%s\n"+
"Header[\"Foo\"] got = %s\"\"%s, want = %s\"baz\"%s\n"+
"Body got %smap[string]interface {}{\"foo\":\"bar\"}%s, want %smap[string]interface {}{\"Hello\":\"World\"}%s\n",
YellowColor, StopColor, YellowColor, StopColor,
RedColor, StopColor, RedColor, StopColor,
RedColor, StopColor, RedColor, StopColor,
RedColor, StopColor, RedColor, StopColor,
)},
}
func TestRequestExecute(t *testing.T) {
http.HandleFunc("/foo/bar", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
fmt.Fprint(w, `{"foo":"bar"}`)
})
ts := httptest.NewServer(http.DefaultServeMux)
defer ts.Close()
Addr = ts.URL[len("http://"):]
for i, tt := range requestExecuteTests {
err := tt.r.Execute(tt.method, tt.path)
if !reflect.DeepEqual(err, tt.err) {
t.Errorf("#%d: err got: \"%v\"\nwant: \"%v\"", i, err, tt.err)
}
}
}
var responseCompareTests = []struct {
r Response
res *http.Response
want error
}{
{
Response{200, nil, nil}, &http.Response{StatusCode: 200}, nil,
}, {
Response{400, nil, nil}, &http.Response{StatusCode: 404},
fmt.Errorf("StatusCode got = %s404%s, want %s400%s\n", RedColor, StopColor, RedColor, StopColor),
}, {
Response{200, Header{"Foo": {"bar"}}, nil},
&http.Response{StatusCode: 200, Header: http.Header{"Foo": {"bar"}}},
nil,
}, {
Response{200, Header{"Foo": {"bar"}}, nil},
&http.Response{StatusCode: 200, Header: http.Header{"Foo": {"baz"}}},
fmt.Errorf("Header[\"Foo\"] got = %s\"baz\"%s, want = %s\"bar\"%s\n", RedColor, StopColor, RedColor, StopColor),
}, {
Response{200, nil, JSONBody{"Hello": "World"}},
&http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(`{"Hello":"World"}`))},
nil,
}, {
Response{200, nil, JSONBody{"Hello": "World"}},
&http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(`{"olleH":"dlroW"}`))},
fmt.Errorf("Body got %smap[string]interface {}{\"olleH\":\"dlroW\"}%s, want %smap[string]interface {}{\"Hello\":\"World\"}%s\n", RedColor, StopColor, RedColor, StopColor),
}, {
Response{200, Header{"Foo": {"bar"}}, JSONBody{"Hello": "World"}},
&http.Response{StatusCode: 200, Header: http.Header{"Foo": {"bar"}}, Body: ioutil.NopCloser(strings.NewReader(`{"Hello":"World"}`))},
nil,
}, {
Response{400, Header{"Foo": {"bar"}}, JSONBody{"Hello": "World"}},
&http.Response{StatusCode: 404, Header: http.Header{"Foo": {"baz"}}, Body: ioutil.NopCloser(strings.NewReader(`{"olleH":"dlroW"}`))},
fmt.Errorf("%s%s%s",
fmt.Sprintf("StatusCode got = %s404%s, want %s400%s\n", RedColor, StopColor, RedColor, StopColor),
fmt.Sprintf("Header[\"Foo\"] got = %s\"baz\"%s, want = %s\"bar\"%s\n", RedColor, StopColor, RedColor, StopColor),
fmt.Sprintf("Body got %smap[string]interface {}{\"olleH\":\"dlroW\"}%s, want %smap[string]interface {}{\"Hello\":\"World\"}%s\n", RedColor, StopColor, RedColor, StopColor),
),
},
}
func TestResponseCompare(t *testing.T) {
//t.SkipNow()
for i, tt := range responseCompareTests {
got := tt.r.Compare(tt.res)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("#%d: got: \"%s\"\nwant: \"%s\"", i, got, tt.want)
}
}
}
func TestHeaderAddTo(t *testing.T) {
//t.SkipNow()
h := Header{"A": {"foo"}, "B": {"bar", "baz"}}
r := &http.Request{Header: http.Header{}}
h.AddTo(r)
want := http.Header{"A": {"foo"}, "B": {"bar", "baz"}}
if !reflect.DeepEqual(r.Header, want) {
t.Errorf("got %v, want %v", r.Header, want)
}
}
func TestHeaderCompare(t *testing.T) {
//t.SkipNow()
h := Header{"A": {"foo"}, "B": {"bar"}}
hh := http.Header{"A": {"foo", "bar"}, "C": {"helloworld"}, "B": {"bar"}}
if err := h.Compare(hh); err != nil {
t.Errorf("got err %v, want <nil>", err)
}
h = Header{"X": {"foo"}, "B": {"baz"}}
want := []string{
fmt.Sprintf(`Header["X"] got = %s""%s, want = %s"foo"%s`, RedColor, StopColor, RedColor, StopColor),
fmt.Sprintf(`Header["B"] got = %s"bar"%s, want = %s"baz"%s`, RedColor, StopColor, RedColor, StopColor),
}
if err := h.Compare(hh); err != nil {
for _, w := range want {
if !strings.Contains(err.Error(), w) {
t.Errorf("error got %v, should contain %q", err, w)
}
}
} else {
t.Error("got err <nil>, want err")
}
}
var bodyerTests = []struct {
bodyer Bodyer
wantType string
wantBody string
err error
}{
{
JSONBody{"x": 123, "y": 0.87654003, "numbers": []interface{}{5, 2, 34}}, appjson,
`{"numbers":[5,2,34],"x":123,"y":0.87654003}`, nil,
},
{
JSONBody{"str": "foobar", "arr": []string{"foo", "bar"}}, appjson,
`{"arr":["foo","bar"],"str":"foobar"}`, nil,
},
{
JSONBody{"obj": map[string]interface{}{"A": "hello", "B": 543, "C": true}}, appjson,
`{"obj":{"A":"hello","B":543,"C":true}}`, nil,
},
{
FormBody{"A": {"foo"}, "C": {"123"}, "B": {"bar", "baz"}}, urlencoded,
`A=foo&B=bar&B=baz&C=123`, nil,
},
{
MultipartBody{"A": {"foo", "bar"}}, multi,
"--testboundary\r\nContent-Disposition: form-data; name=\"A\"\r\n\r\nfoo\r\n--testboundary\r\nContent-Disposition: form-data; name=\"A\"\r\n\r\nbar\r\n--testboundary--\r\n", nil,
},
{
// TODO:(mkopriva) randomly fails/passes as the file's headers Content-Disposition
// and Content-Type are not always serialized in the same order.
MultipartBody{"A": {"foo", File{"text/plain", "hit-test.txt", "Test file content."}}}, multi,
"--testboundary\r\nContent-Disposition: form-data; name=\"A\"\r\n\r\nfoo\r\n--testboundary\r\nContent-Disposition: form-data; name=\"A\"; filename=\"hit-test.txt\"\r\nContent-Type: text/plain\r\n\r\nTest file content.\r\n--testboundary--\r\n", nil,
},
}
func TestBodyer(t *testing.T) {
//t.SkipNow()
for i, tt := range bodyerTests {
if got, want := tt.bodyer.Type(), tt.wantType; got != want {
t.Errorf("#%d: type got %q, want %q", i, got, want)
}
r, err := tt.bodyer.Body()
if err != tt.err {
t.Errorf("#%d: err got %v, want %v", i, err, tt.err)
}
b, err := ioutil.ReadAll(r)
if err != nil {
t.Errorf("#%d: ioutil.ReadAll got err %v, want <nil>", i, err)
}
if got, want := string(b), tt.wantBody; got != want {
t.Errorf("#%d: body got %q, want %q", i, got, want)
}
}
}
func TestRequestsSkip(t *testing.T) | {
got := Requests{
"GET": {{}, {Skip: true}, {}},
"POST": {{}, {}},
}.Skip()
want := Requests{
"GET": {{Skip: true}, {Skip: true}, {Skip: true}},
"POST": {{Skip: true}, {Skip: true}},
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %+v, want %+v", got, want)
}
} |
|
aggregator.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apimachinery
import (
"context"
"crypto/rand"
"encoding/json"
"fmt"
"math/big"
"net"
"strings"
"time"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
unstructuredv1 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
clientset "k8s.io/client-go/kubernetes"
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1"
"k8s.io/utils/pointer"
"github.com/onsi/ginkgo"
)
const (
aggregatorServicePort = 7443
)
var _ = SIGDescribe("Aggregator", func() {
var ns string
var c clientset.Interface
var aggrclient *aggregatorclient.Clientset
// BeforeEachs run in LIFO order, AfterEachs run in FIFO order.
// We want cleanTest to happen before the namespace cleanup AfterEach
// inserted by NewDefaultFramework, so we put this AfterEach in front
// of NewDefaultFramework.
ginkgo.AfterEach(func() {
cleanTest(c, aggrclient, ns)
})
f := framework.NewDefaultFramework("aggregator")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
// We want namespace initialization BeforeEach inserted by
// NewDefaultFramework to happen before this, so we put this BeforeEach
// after NewDefaultFramework.
ginkgo.BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
if aggrclient == nil {
config, err := framework.LoadConfig()
if err != nil {
framework.Failf("could not load config: %v", err)
}
aggrclient, err = aggregatorclient.NewForConfig(config)
if err != nil {
framework.Failf("could not create aggregator client: %v", err)
}
}
})
/*
Release: v1.17, v1.21
Testname: aggregator-supports-the-sample-apiserver
Description: Ensure that the sample-apiserver code from 1.17 and compiled against 1.17
will work on the current Aggregator/API-Server.
*/
framework.ConformanceIt("Should be able to support the 1.17 Sample API Server using the current Aggregator", func() {
// Testing a 1.17 version of the sample-apiserver
TestSampleAPIServer(f, aggrclient, imageutils.GetE2EImage(imageutils.APIServer))
})
})
func cleanTest(client clientset.Interface, aggrclient *aggregatorclient.Clientset, namespace string) {
// delete the APIService first to avoid causing discovery errors
_ = aggrclient.ApiregistrationV1().APIServices().Delete(context.TODO(), "v1alpha1.wardle.example.com", metav1.DeleteOptions{})
// this simple sleep makes sure that the sample api server was unregistered from all Kube APIs before tearing down the deployment (otherwise it could make the test to fail)
// a more expensive way of doing it would be checking if the sample server was unregistered from all deployed Kube API servers before tearing down the deployment.
framework.Logf("sleeping 45 seconds before deleting the sample-apiserver deployment, see %q for more", "https://bugzilla.redhat.com/show_bug.cgi?id=1933144")
time.Sleep(time.Second * 45)
_ = client.AppsV1().Deployments(namespace).Delete(context.TODO(), "sample-apiserver-deployment", metav1.DeleteOptions{})
_ = client.CoreV1().Secrets(namespace).Delete(context.TODO(), "sample-apiserver-secret", metav1.DeleteOptions{})
_ = client.CoreV1().Services(namespace).Delete(context.TODO(), "sample-api", metav1.DeleteOptions{})
_ = client.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(), "sample-apiserver", metav1.DeleteOptions{})
_ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), "wardler-auth-reader", metav1.DeleteOptions{})
_ = client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "wardler:"+namespace+":auth-delegator", metav1.DeleteOptions{})
_ = client.RbacV1().ClusterRoles().Delete(context.TODO(), "sample-apiserver-reader", metav1.DeleteOptions{})
_ = client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "wardler:"+namespace+":sample-apiserver-reader", metav1.DeleteOptions{})
}
// TestSampleAPIServer is a basic test if the sample-apiserver code from 1.10 and compiled against 1.10
// will work on the current Aggregator/API-Server.
func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Clientset, image string) {
ginkgo.By("Registering the sample API server.")
client := f.ClientSet
restClient := client.Discovery().RESTClient()
namespace := f.Namespace.Name
certCtx := setupServerCert(namespace, "sample-api")
// kubectl create -f namespace.yaml
// NOTE: aggregated apis should generally be set up in their own namespace. As the test framework is setting up a new namespace, we are just using that.
// kubectl create -f secret.yaml
secretName := "sample-apiserver-secret"
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
},
Type: v1.SecretTypeOpaque,
Data: map[string][]byte{
"tls.crt": certCtx.cert,
"tls.key": certCtx.key,
},
}
_, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace)
// kubectl create -f clusterrole.yaml
_, err = client.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: "sample-apiserver-reader"},
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch").Groups("").Resources("namespaces").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch").Groups("admissionregistration.k8s.io").Resources("*").RuleOrDie(),
},
}, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating cluster role %s", "sample-apiserver-reader")
_, err = client.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "wardler:" + namespace + ":sample-apiserver-reader",
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "sample-apiserver-reader",
},
Subjects: []rbacv1.Subject{
{
APIGroup: "",
Kind: "ServiceAccount",
Name: "default",
Namespace: namespace,
},
},
}, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating cluster role binding %s", "wardler:"+namespace+":sample-apiserver-reader")
// kubectl create -f authDelegator.yaml
_, err = client.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "wardler:" + namespace + ":auth-delegator",
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "system:auth-delegator",
},
Subjects: []rbacv1.Subject{
{
APIGroup: "",
Kind: "ServiceAccount",
Name: "default",
Namespace: namespace,
},
},
}, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating cluster role binding %s", "wardler:"+namespace+":auth-delegator")
// kubectl create -f deploy.yaml
deploymentName := "sample-apiserver-deployment"
etcdImage := imageutils.GetE2EImage(imageutils.Etcd)
podLabels := map[string]string{"app": "sample-apiserver", "apiserver": "true"}
replicas := int32(1)
etcdLocalhostAddress := "127.0.0.1"
if framework.TestContext.ClusterIsIPv6() {
etcdLocalhostAddress = "::1"
}
etcdURL := fmt.Sprintf("http://%s", net.JoinHostPort(etcdLocalhostAddress, "2379"))
mounts := []v1.VolumeMount{
{
Name: "apiserver-certs",
ReadOnly: true,
MountPath: "/apiserver.local.config/certificates",
},
}
volumes := []v1.Volume{
{
Name: "apiserver-certs",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{SecretName: secretName},
},
},
}
containers := []v1.Container{
{
Name: "sample-apiserver",
VolumeMounts: mounts,
Args: []string{
fmt.Sprintf("--etcd-servers=%s", etcdURL),
"--tls-cert-file=/apiserver.local.config/certificates/tls.crt",
"--tls-private-key-file=/apiserver.local.config/certificates/tls.key",
"--audit-log-path=-",
"--audit-log-maxage=0",
"--audit-log-maxbackup=0",
},
Image: image,
},
{
Name: "etcd",
Image: etcdImage,
Command: []string{
"/usr/local/bin/etcd",
"--listen-client-urls",
etcdURL,
"--advertise-client-urls",
etcdURL,
},
},
}
d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, "", "", appsv1.RollingUpdateDeploymentStrategyType)
d.Spec.Template.Spec.Containers = containers
d.Spec.Template.Spec.Volumes = volumes
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
err = e2edeployment.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)
err = e2edeployment.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", etcdImage)
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", etcdImage, deploymentName, namespace)
// kubectl create -f service.yaml
serviceLabels := map[string]string{"apiserver": "true"}
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: "sample-api",
Labels: map[string]string{"test": "aggregator"},
},
Spec: v1.ServiceSpec{
Selector: serviceLabels,
Ports: []v1.ServicePort{
{
Protocol: "TCP",
Port: aggregatorServicePort,
TargetPort: intstr.FromInt(443),
},
},
},
}
_, err = client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating service %s in namespace %s", "sample-apiserver", namespace)
// kubectl create -f serviceAccount.yaml
sa := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "sample-apiserver"}}
_, err = client.CoreV1().ServiceAccounts(namespace).Create(context.TODO(), sa, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating service account %s in namespace %s", "sample-apiserver", namespace)
// kubectl create -f auth-reader.yaml
_, err = client.RbacV1().RoleBindings("kube-system").Create(context.TODO(), &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "wardler-auth-reader",
Annotations: map[string]string{
rbacv1.AutoUpdateAnnotationKey: "true",
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "",
Kind: "Role",
Name: "extension-apiserver-authentication-reader",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: "default",
Namespace: namespace,
},
},
}, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating role binding %s:sample-apiserver to access configMap", namespace)
// Wait for the extension apiserver to be up and healthy
// kubectl get deployments -n <aggregated-api-namespace> && status == Running
// NOTE: aggregated apis should generally be set up in their own namespace (<aggregated-api-namespace>). As the test framework
// is setting up a new namespace, we are just using that.
err = e2edeployment.WaitForDeploymentComplete(client, deployment)
framework.ExpectNoError(err, "deploying extension apiserver in namespace %s", namespace)
// kubectl create -f apiservice.yaml
_, err = aggrclient.ApiregistrationV1().APIServices().Create(context.TODO(), &apiregistrationv1.APIService{
ObjectMeta: metav1.ObjectMeta{Name: "v1alpha1.wardle.example.com"},
Spec: apiregistrationv1.APIServiceSpec{
Service: &apiregistrationv1.ServiceReference{
Namespace: namespace,
Name: "sample-api",
Port: pointer.Int32Ptr(aggregatorServicePort),
},
Group: "wardle.example.com",
Version: "v1alpha1",
CABundle: certCtx.signingCert,
GroupPriorityMinimum: 2000,
VersionPriority: 200,
},
}, metav1.CreateOptions{})
framework.ExpectNoError(err, "creating apiservice %s with namespace %s", "v1alpha1.wardle.example.com", namespace)
var (
currentAPIService *apiregistrationv1.APIService
currentPods *v1.PodList
)
err = pollTimed(100*time.Millisecond, 60*time.Second, func() (bool, error) {
currentAPIService, _ = aggrclient.ApiregistrationV1().APIServices().Get(context.TODO(), "v1alpha1.wardle.example.com", metav1.GetOptions{})
currentPods, _ = client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
request := restClient.Get().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders")
request.SetHeader("Accept", "application/json")
_, err := request.DoRaw(context.TODO())
if err != nil {
status, ok := err.(*apierrors.StatusError)
if !ok {
return false, err
}
if status.Status().Code == 403 || status.Status().Code == 503 {
return false, nil
}
if status.Status().Code == 404 && strings.HasPrefix(err.Error(), "the server could not find the requested resource") {
return false, nil
}
return false, err
}
return true, nil
}, "Waited %s for the sample-apiserver to be ready to handle requests.")
if err != nil {
currentAPIServiceJSON, _ := json.Marshal(currentAPIService)
framework.Logf("current APIService: %s", string(currentAPIServiceJSON))
currentPodsJSON, _ := json.Marshal(currentPods)
framework.Logf("current pods: %s", string(currentPodsJSON))
if currentPods != nil {
for _, pod := range currentPods.Items {
for _, container := range pod.Spec.Containers { | }
}
}
framework.ExpectNoError(err, "gave up waiting for apiservice wardle to come up successfully")
flunderName := generateFlunderName("rest-flunder")
// kubectl create -f flunders-1.yaml -v 9
// curl -k -v -XPOST https://localhost/apis/wardle.example.com/v1alpha1/namespaces/default/flunders
// Request Body: {"apiVersion":"wardle.example.com/v1alpha1","kind":"Flunder","metadata":{"labels":{"sample-label":"true"},"name":"test-flunder","namespace":"default"}}
flunder := `{"apiVersion":"wardle.example.com/v1alpha1","kind":"Flunder","metadata":{"labels":{"sample-label":"true"},"name":"` + flunderName + `","namespace":"default"}}`
result := restClient.Post().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders").Body([]byte(flunder)).SetHeader("Accept", "application/json").Do(context.TODO())
framework.ExpectNoError(result.Error(), "creating a new flunders resource")
var statusCode int
result.StatusCode(&statusCode)
if statusCode != 201 {
framework.Failf("Flunders client creation response was status %d, not 201", statusCode)
}
u := &unstructured.Unstructured{}
if err := result.Into(u); err != nil {
framework.ExpectNoError(err, "reading created response")
}
framework.ExpectEqual(u.GetAPIVersion(), "wardle.example.com/v1alpha1")
framework.ExpectEqual(u.GetKind(), "Flunder")
framework.ExpectEqual(u.GetName(), flunderName)
pods, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "getting pods for flunders service")
// kubectl get flunders -v 9
// curl -k -v -XGET https://localhost/apis/wardle.example.com/v1alpha1/namespaces/default/flunders
contents, err := restClient.Get().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders").SetHeader("Accept", "application/json").DoRaw(context.TODO())
framework.ExpectNoError(err, "attempting to get a newly created flunders resource")
var flundersList samplev1alpha1.FlunderList
err = json.Unmarshal(contents, &flundersList)
validateErrorWithDebugInfo(f, err, pods, "Error in unmarshalling %T response from server %s", contents, "/apis/wardle.example.com/v1alpha1")
if len(flundersList.Items) != 1 {
framework.Failf("failed to get back the correct flunders list %v", flundersList)
}
// kubectl delete flunder test-flunder -v 9
// curl -k -v -XDELETE https://35.193.112.40/apis/wardle.example.com/v1alpha1/namespaces/default/flunders/test-flunder
_, err = restClient.Delete().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders/" + flunderName).DoRaw(context.TODO())
validateErrorWithDebugInfo(f, err, pods, "attempting to delete a newly created flunders(%v) resource", flundersList.Items)
// kubectl get flunders -v 9
// curl -k -v -XGET https://localhost/apis/wardle.example.com/v1alpha1/namespaces/default/flunders
contents, err = restClient.Get().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders").SetHeader("Accept", "application/json").DoRaw(context.TODO())
framework.ExpectNoError(err, "confirming delete of a newly created flunders resource")
err = json.Unmarshal(contents, &flundersList)
validateErrorWithDebugInfo(f, err, pods, "Error in unmarshalling %T response from server %s", contents, "/apis/wardle.example.com/v1alpha1")
if len(flundersList.Items) != 0 {
framework.Failf("failed to get back the correct deleted flunders list %v", flundersList)
}
flunderName = generateFlunderName("dynamic-flunder")
// Rerun the Create/List/Delete tests using the Dynamic client.
resources, discoveryErr := client.Discovery().ServerPreferredNamespacedResources()
groupVersionResources, err := discovery.GroupVersionResources(resources)
framework.ExpectNoError(err, "getting group version resources for dynamic client")
gvr := schema.GroupVersionResource{Group: "wardle.example.com", Version: "v1alpha1", Resource: "flunders"}
_, ok := groupVersionResources[gvr]
if !ok {
framework.Failf("could not find group version resource for dynamic client and wardle/flunders (discovery error: %v, discovery results: %#v)", discoveryErr, groupVersionResources)
}
dynamicClient := f.DynamicClient.Resource(gvr).Namespace(namespace)
// kubectl create -f flunders-1.yaml
// Request Body: {"apiVersion":"wardle.example.com/v1alpha1","kind":"Flunder","metadata":{"labels":{"sample-label":"true"},"name":"test-flunder","namespace":"default"}}
testFlunder := samplev1alpha1.Flunder{
TypeMeta: metav1.TypeMeta{
Kind: "Flunder",
APIVersion: "wardle.example.com/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{Name: flunderName},
Spec: samplev1alpha1.FlunderSpec{},
}
jsonFlunder, err := json.Marshal(testFlunder)
framework.ExpectNoError(err, "marshalling test-flunder for create using dynamic client")
unstruct := &unstructuredv1.Unstructured{}
err = unstruct.UnmarshalJSON(jsonFlunder)
framework.ExpectNoError(err, "unmarshalling test-flunder as unstructured for create using dynamic client")
_, err = dynamicClient.Create(context.TODO(), unstruct, metav1.CreateOptions{})
framework.ExpectNoError(err, "listing flunders using dynamic client")
// kubectl get flunders
unstructuredList, err := dynamicClient.List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "listing flunders using dynamic client")
if len(unstructuredList.Items) != 1 {
framework.Failf("failed to get back the correct flunders list %v from the dynamic client", unstructuredList)
}
ginkgo.By("Read Status for v1alpha1.wardle.example.com")
statusContent, err := restClient.Get().
AbsPath("/apis/apiregistration.k8s.io/v1/apiservices/v1alpha1.wardle.example.com/status").
SetHeader("Accept", "application/json").DoRaw(context.TODO())
framework.ExpectNoError(err, "No response for .../apiservices/v1alpha1.wardle.example.com/status. Error: %v", err)
var jr *apiregistrationv1.APIService
err = json.Unmarshal([]byte(statusContent), &jr)
framework.ExpectNoError(err, "Failed to process statusContent: %v | err: %v ", string(statusContent), err)
framework.ExpectEqual(jr.Status.Conditions[0].Message, "all checks passed", "The Message returned was %v", jr.Status.Conditions[0].Message)
ginkgo.By("kubectl patch apiservice v1alpha1.wardle.example.com -p '{\"spec\":{\"versionPriority\": 400}}'")
patchContent, err := restClient.Patch(types.MergePatchType).
AbsPath("/apis/apiregistration.k8s.io/v1/apiservices/v1alpha1.wardle.example.com").
SetHeader("Accept", "application/json").
Body([]byte(`{"spec":{"versionPriority": 400}}`)).DoRaw(context.TODO())
framework.ExpectNoError(err, "Patch failed for .../apiservices/v1alpha1.wardle.example.com. Error: %v", err)
err = json.Unmarshal([]byte(patchContent), &jr)
framework.ExpectNoError(err, "Failed to process patchContent: %v | err: %v ", string(patchContent), err)
framework.ExpectEqual(jr.Spec.VersionPriority, int32(400), "The VersionPriority returned was %d", jr.Spec.VersionPriority)
ginkgo.By("List APIServices")
listApiservices, err := restClient.Get().
AbsPath("/apis/apiregistration.k8s.io/v1/apiservices").
SetHeader("Accept", "application/json").DoRaw(context.TODO())
framework.ExpectNoError(err, "No response for /apis/apiregistration.k8s.io/v1/apiservices Error: %v", err)
var list *apiregistrationv1.APIServiceList
err = json.Unmarshal([]byte(listApiservices), &list)
framework.ExpectNoError(err, "Failed to process APIServiceList: %v | err: %v ", list, err)
locatedWardle := false
for _, item := range list.Items {
if item.Name == "v1alpha1.wardle.example.com" {
framework.Logf("Found v1alpha1.wardle.example.com in APIServiceList")
locatedWardle = true
break
}
}
if !locatedWardle {
framework.Failf("Unable to find v1alpha1.wardle.example.com in APIServiceList")
}
// kubectl delete flunder test-flunder
err = dynamicClient.Delete(context.TODO(), flunderName, metav1.DeleteOptions{})
validateErrorWithDebugInfo(f, err, pods, "deleting flunders(%v) using dynamic client", unstructuredList.Items)
// kubectl get flunders
unstructuredList, err = dynamicClient.List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "listing flunders using dynamic client")
if len(unstructuredList.Items) != 0 {
framework.Failf("failed to get back the correct deleted flunders list %v from the dynamic client", unstructuredList)
}
cleanTest(client, aggrclient, namespace)
}
// pollTimed will call Poll but time how long Poll actually took.
// It will then framework.Logf the msg with the duration of the Poll.
// It is assumed that msg will contain one %s for the elapsed time.
func pollTimed(interval, timeout time.Duration, condition wait.ConditionFunc, msg string) error {
defer func(start time.Time, msg string) {
elapsed := time.Since(start)
framework.Logf(msg, elapsed)
}(time.Now(), msg)
return wait.Poll(interval, timeout, condition)
}
func validateErrorWithDebugInfo(f *framework.Framework, err error, pods *v1.PodList, msg string, fields ...interface{}) {
if err != nil {
namespace := f.Namespace.Name
msg := fmt.Sprintf(msg, fields...)
msg += fmt.Sprintf(" but received unexpected error:\n%v", err)
client := f.ClientSet
ep, err := client.CoreV1().Endpoints(namespace).Get(context.TODO(), "sample-api", metav1.GetOptions{})
if err == nil {
msg += fmt.Sprintf("\nFound endpoints for sample-api:\n%v", ep)
}
pds, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
if err == nil {
msg += fmt.Sprintf("\nFound pods in %s:\n%v", namespace, pds)
msg += fmt.Sprintf("\nOriginal pods in %s:\n%v", namespace, pods)
}
framework.Failf(msg)
}
}
func generateFlunderName(base string) string {
id, err := rand.Int(rand.Reader, big.NewInt(2147483647))
if err != nil {
return base
}
return fmt.Sprintf("%s-%d", base, id)
} | logs, err := e2epod.GetPodLogs(client, namespace, pod.Name, container.Name)
framework.Logf("logs of %s/%s (error: %v): %s", pod.Name, container.Name, err, logs)
} |
collect.go | package cli
import (
"encoding/json"
"github.com/cosmos/cosmos-sdk/client/flags"
"path/filepath"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/tendermint/tendermint/libs/cli"
tmtypes "github.com/tendermint/tendermint/types"
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/server"
"github.com/omexapp/omexchain/x/genutil"
)
const flagGenTxDir = "gentx-dir"
// CollectGenTxsCmd returns the cobra command to collect genesis transactions
func CollectGenTxsCmd(ctx *server.Context, cdc *codec.Codec,
genAccIterator genutil.GenesisAccountsIterator, defaultNodeHome string) *cobra.Command | {
cmd := &cobra.Command{
Use: "collect-gentxs",
Short: "Collect genesis txs and output a genesis.json file",
RunE: func(_ *cobra.Command, _ []string) error {
config := ctx.Config
config.SetRoot(viper.GetString(cli.HomeFlag))
name := viper.GetString(flags.FlagName)
nodeID, valPubKey, err := genutil.InitializeNodeValidatorFiles(config)
if err != nil {
return err
}
genDoc, err := tmtypes.GenesisDocFromFile(config.GenesisFile())
if err != nil {
return err
}
genTxsDir := viper.GetString(flagGenTxDir)
if genTxsDir == "" {
genTxsDir = filepath.Join(config.RootDir, "config", "gentx")
}
toPrint := newPrintInfo(config.Moniker, genDoc.ChainID, nodeID, genTxsDir, json.RawMessage(""))
initCfg := genutil.NewInitConfig(genDoc.ChainID, genTxsDir, name, nodeID, valPubKey)
appMessage, err := genutil.GenAppStateFromConfig(cdc, config, initCfg, *genDoc, genAccIterator)
if err != nil {
return err
}
toPrint.AppMessage = appMessage
// print out some key information
return displayInfo(cdc, toPrint)
},
}
cmd.Flags().String(cli.HomeFlag, defaultNodeHome, "node's home directory")
cmd.Flags().String(flagGenTxDir, "",
"override default \"gentx\" directory from which collect and execute "+
"genesis transactions; default [--home]/config/gentx/")
return cmd
} |
|
main.go | package main
import "github.com/qba73/gosi"
func main() | {
gosi.RunCLI()
} |
|
mod.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
pub mod backup_service_client;
pub mod read_record_bytes;
pub mod storage_ext;
pub(crate) mod stream;
#[cfg(test)]
pub mod test_utils;
use anyhow::{anyhow, Result};
use libra_config::config::RocksdbConfig;
use libra_crypto::HashValue;
use libra_infallible::duration_since_epoch;
use libra_jellyfish_merkle::{restore::JellyfishMerkleRestore, NodeBatch, TreeWriter};
use libra_types::transaction::Version;
use libradb::{backup::restore_handler::RestoreHandler, GetRestoreHandler, LibraDB};
use std::{
convert::TryFrom,
mem::size_of,
path::{Path, PathBuf},
sync::Arc,
};
use structopt::StructOpt;
use tokio::fs::metadata;
#[derive(Clone, StructOpt)]
pub struct GlobalBackupOpt {
// Defaults to 128MB, so concurrent chunk downloads won't take up too much memory.
#[structopt(
long = "max-chunk-size",
default_value = "134217728",
help = "Maximum chunk file size in bytes."
)]
pub max_chunk_size: usize,
}
#[derive(Clone, StructOpt)]
pub struct RocksdbOpt {
// using a smaller value than a node since we don't care much about reading performance
// in this tool.
#[structopt(long, default_value = "1000")]
max_open_files: i32,
// using the same default with a node (1GB).
#[structopt(long, default_value = "1073741824")]
max_total_wal_size: u64,
}
impl From<RocksdbOpt> for RocksdbConfig {
fn from(opt: RocksdbOpt) -> Self {
Self {
max_open_files: opt.max_open_files,
max_total_wal_size: opt.max_total_wal_size,
}
}
}
impl Default for RocksdbOpt {
fn default() -> Self {
Self::from_iter(vec!["exe"])
}
}
#[derive(Clone, StructOpt)]
pub struct GlobalRestoreOpt {
#[structopt(long, help = "Dry run without writing data to DB.")]
pub dry_run: bool,
#[structopt(
long = "target-db-dir",
parse(from_os_str),
conflicts_with = "dry-run",
required_unless = "dry-run"
)]
pub db_dir: Option<PathBuf>,
#[structopt(
long,
help = "Content newer than this version will not be recovered to DB, \
defaulting to the largest version possible, meaning recover everything in the backups."
)]
pub target_version: Option<Version>,
#[structopt(flatten)]
pub rocksdb_opt: RocksdbOpt,
}
pub enum RestoreRunMode {
Restore { restore_handler: RestoreHandler },
Verify,
}
struct MockTreeWriter;
impl TreeWriter for MockTreeWriter {
fn write_node_batch(&self, _node_batch: &NodeBatch) -> Result<()> {
Ok(())
}
}
impl RestoreRunMode {
pub fn name(&self) -> &'static str {
match self {
Self::Restore { restore_handler: _ } => "restore", | Self::Verify => "verify",
}
}
pub fn is_verify(&self) -> bool {
match self {
Self::Restore { restore_handler: _ } => false,
Self::Verify => true,
}
}
pub fn get_state_restore_receiver(
&self,
version: Version,
expected_root_hash: HashValue,
) -> Result<JellyfishMerkleRestore> {
match self {
Self::Restore { restore_handler } => {
restore_handler.get_state_restore_receiver(version, expected_root_hash)
}
Self::Verify => JellyfishMerkleRestore::new_overwrite(
Arc::new(MockTreeWriter),
version,
expected_root_hash,
),
}
}
}
#[derive(Clone)]
pub struct GlobalRestoreOptions {
pub target_version: Version,
pub run_mode: Arc<RestoreRunMode>,
}
impl TryFrom<GlobalRestoreOpt> for GlobalRestoreOptions {
type Error = anyhow::Error;
fn try_from(opt: GlobalRestoreOpt) -> Result<Self> {
let target_version = opt.target_version.unwrap_or(Version::max_value());
let run_mode = if let Some(db_dir) = &opt.db_dir {
let restore_handler = Arc::new(LibraDB::open(
db_dir,
false, /* read_only */
None, /* pruner */
opt.rocksdb_opt.into(),
)?)
.get_restore_handler();
RestoreRunMode::Restore { restore_handler }
} else {
RestoreRunMode::Verify
};
Ok(Self {
target_version,
run_mode: Arc::new(run_mode),
})
}
}
pub(crate) fn should_cut_chunk(chunk: &[u8], record: &[u8], max_chunk_size: usize) -> bool {
!chunk.is_empty() && chunk.len() + record.len() + size_of::<u32>() > max_chunk_size
}
// TODO: use Path::exists() when Rust 1.5 stabilizes.
pub(crate) async fn path_exists(path: &PathBuf) -> bool {
metadata(&path).await.is_ok()
}
pub(crate) trait PathToString {
fn path_to_string(&self) -> Result<String>;
}
impl<T: AsRef<Path>> PathToString for T {
fn path_to_string(&self) -> Result<String> {
self.as_ref()
.to_path_buf()
.into_os_string()
.into_string()
.map_err(|s| anyhow!("into_string failed for OsString '{:?}'", s))
}
}
pub(crate) fn unix_timestamp_sec() -> i64 {
duration_since_epoch().as_secs() as i64
} | |
main.rs | #![deny(unsafe_code)]
#![no_main]
#![no_std]
#[allow(unused_imports)]
use aux14::{entry, iprint, iprintln, prelude::*};
// Slave address
const MAGNETOMETER: u16 = 0b0011_1100;
// Addresses of the magnetometer's registers
const OUT_X_H_M: u8 = 0x03;
const IRA_REG_M: u8 = 0x0A;
#[entry]
fn | () -> ! {
let (i2c1, _delay, mut itm) = aux14::init();
// Stage 1: Send the address of the register we want to read to the
// magnetometer
{
// TODO Broadcast START
// TODO Broadcast the MAGNETOMETER address with the R/W bit set to Write
// TODO Send the address of the register that we want to read: IRA_REG_M
}
// Stage 2: Receive the contents of the register we asked for
let byte = {
// TODO Broadcast RESTART
// TODO Broadcast the MAGNETOMETER address with the R/W bit set to Read
// TODO Receive the contents of the register
// TODO Broadcast STOP
0
};
// Expected output: 0x0A - 0b01001000
iprintln!(&mut itm.stim[0], "0x{:02X} - 0b{:08b}", IRA_REG_M, byte);
loop {}
}
| main |
compare_speed_with_pytorch.py | import numpy as np
import time
import tempfile
import os
import importlib.util
import argparse
from typing import Sequence
import subprocess
import re
import oneflow as flow
import oneflow._oneflow_internal as oneflow_internal
DEFAULT_TIMES = 20
gpu_memory_used_by_oneflow = 0
def import_file(path):
spec = importlib.util.spec_from_file_location("mod", path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
def sync(x):
if test_oneflow:
x.numpy()
else:
x.cpu()
def | ():
output = subprocess.check_output(
[
"nvidia-smi",
"--query-compute-apps=pid,used_gpu_memory",
"--format=csv,noheader",
]
)
output = output.decode("utf-8").strip()
my_pid = os.getpid()
mem_used_by_me = 0
for line in output.split("\n"):
pid, mem_used = map(int, re.split(",? ", line)[:2])
if pid == my_pid:
mem_used_by_me += mem_used
return mem_used_by_me
def print_rank_0(*args, **kwargs):
rank = int(os.getenv("RANK", "0"))
if rank == 0:
print(*args, **kwargs)
def test(
model_path: str,
module_name: str,
input_shape: Sequence[int],
disable_backward=False,
times=DEFAULT_TIMES,
no_verbose=False,
ddp=False,
ddp_broadcast_buffers=False,
show_memory=True,
):
framework_name = "OneFlow" if test_oneflow else "PyTorch"
if test_oneflow:
python_module = import_file(model_path)
torch = flow
else:
with open(model_path) as f:
buf = f.read()
lines = buf.split("\n")
for i, line in enumerate(lines):
if "import" not in line and len(line.strip()) != 0:
break
lines = (
lines[:i]
+ [
"import torch as flow",
"import torch.nn as nn",
"from torch import Tensor",
"from torch.nn import Parameter",
]
+ lines[i:]
)
buf = "\n".join(lines)
with tempfile.NamedTemporaryFile("w", suffix=".py") as f:
f.write(buf)
f.flush()
python_module = import_file(f.name)
import torch
if ddp:
import torch.distributed as dist
local_rank_env_var = os.getenv("LOCAL_RANK")
assert local_rank_env_var is not None
rank = int(local_rank_env_var)
torch.cuda.set_device(rank)
dist.init_process_group(backend="nccl", init_method="env://")
Net = getattr(python_module, module_name)
warmup_times = 5
m = Net()
m = m.to("cuda")
if ddp:
if test_oneflow:
m = torch.nn.parallel.DistributedDataParallel(
m, broadcast_buffers=ddp_broadcast_buffers
)
else:
m = torch.nn.parallel.DistributedDataParallel(
m, device_ids=[rank], broadcast_buffers=ddp_broadcast_buffers
)
def run_model(m, x):
if disable_backward:
with torch.no_grad():
return m(x)
else:
return m(x)
learning_rate = 0.01
mom = 0.9
optimizer = torch.optim.SGD(m.parameters(), lr=learning_rate, momentum=mom)
# input tensor of OneFlow should set requires_grad=False due to a bug
x = torch.tensor(
np.ones(input_shape).astype(np.float32), requires_grad=not test_oneflow
).to("cuda")
for i in range(warmup_times + times):
if i == warmup_times:
start = time.time()
y = run_model(m, x)
if not disable_backward:
y = y.sum()
y.backward()
optimizer.zero_grad()
optimizer.step()
sync(y)
end = time.time()
total_time_ms = (end - start) * 1000
time_per_run_ms = total_time_ms / times
if no_verbose:
print_rank_0(f"{framework_name}: {time_per_run_ms:.1f}ms")
else:
print_rank_0(
f"{framework_name} {module_name} time: {time_per_run_ms:.1f}ms (= {total_time_ms:.1f}ms / {times}, input_shape={input_shape}{', backward is disabled' if disable_backward else ''}{', ddp' if ddp else ''}{', ddp_broadcast_buffers is disabled' if not ddp_broadcast_buffers else ''}{f', world size={flow.env.get_world_size()}' if flow.env.get_world_size() != 1 else ''})"
)
if show_memory:
global gpu_memory_used_by_oneflow
if test_oneflow:
gpu_memory_used_by_oneflow = gpu_memory_used()
print_rank_0(
f"{framework_name} GPU used (rank 0): {gpu_memory_used_by_oneflow} MiB"
)
else:
print_rank_0(
f"{framework_name} GPU used (rank 0, estimated): {gpu_memory_used() - gpu_memory_used_by_oneflow} MiB"
)
if ddp and not test_oneflow:
import torch.distributed as dist
dist.destroy_process_group()
return time_per_run_ms
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("model_path", type=str)
parser.add_argument("module_name", type=str)
parser.add_argument("input_shape", type=str)
parser.add_argument("--times", type=int, default=DEFAULT_TIMES)
parser.add_argument("--disable-backward", action="store_true")
parser.add_argument("--no-verbose", action="store_true")
parser.add_argument("--ddp", action="store_true")
parser.add_argument("--ddp-no-broadcast-buffers", action="store_true")
parser.add_argument("--only-oneflow", action="store_true")
parser.add_argument("--only-pytorch", action="store_true")
parser.add_argument("--no-show-memory", action="store_true")
args = parser.parse_args()
input_shape = list(map(int, args.input_shape.split("x")))
global test_oneflow
if not args.only_pytorch:
# NOTE: PyTorch must run after OneFlow for correct memory usage
test_oneflow = True
oneflow_time = test(
args.model_path,
args.module_name,
input_shape,
disable_backward=args.disable_backward,
times=args.times,
no_verbose=args.no_verbose,
ddp=args.ddp,
ddp_broadcast_buffers=not args.ddp_no_broadcast_buffers,
show_memory=not args.no_show_memory,
)
if not args.only_oneflow:
test_oneflow = False
pytorch_time = test(
args.model_path,
args.module_name,
input_shape,
disable_backward=args.disable_backward,
times=args.times,
no_verbose=args.no_verbose,
ddp=args.ddp,
ddp_broadcast_buffers=not args.ddp_no_broadcast_buffers,
show_memory=not args.no_show_memory,
)
if not args.only_pytorch and not args.only_oneflow:
relative_speed = pytorch_time / oneflow_time
if args.no_verbose:
print_rank_0(f"Relative speed: {relative_speed:.2f}")
else:
print_rank_0(
f"Relative speed: {relative_speed:.2f} (= {pytorch_time:.1f}ms / {oneflow_time:.1f}ms)"
)
| gpu_memory_used |
webpack.prod.config.js | import HtmlWebpackPlugin from 'html-webpack-plugin'
import ExtractTextPlugin from 'extract-text-webpack-plugin'
import webpack from 'webpack'
import paths from './paths'
import * as config from './webpack.common.config'
import { getPack } from './package'
export default function | ({name, html = {}, px2rem = {}, framework = 'jquery', isCDN = 'no'}) {
const pack = getPack()
const localPublicPath = paths.resolve('/fe', pack['namespace'], pack['name'])
// px2rem
const px2remConfig = {
remUnit : 75,
remPrecision: 8,
...px2rem
}
return {
...{
dirName : name,
devtool : "#cheap-module-source-map",
resolveLoader: {
modulesDirectories: paths.isInFfanScripts ? [paths.ownNodeModules] : [paths.appNodeModules],
moduleTemplates : ['*-loader', '*']
},
},
output : {
path : paths.appBuild,
filename : `assets/js/${name}/[name]_[hash:4].js`,
publicPath: (isCDN === 'yes') ? 'https://nres.ffan.com/newactivity/' : localPublicPath,
},
resolve : config.resolve,
externals: config.externals,
module : {
noParse: config.noParse,
loaders: config.getLoaders(px2remConfig, paths.appSrc, name, true),
...config.getModule(px2remConfig)
},
vue : config.vueConfig,
plugins : [
new HtmlWebpackPlugin({
filename: `html/${name}.html`,
...html,
isCDN : isCDN === 'yes',
template: html.template || paths.resolve(paths.appHtmlTemplates, `${framework}Tpl.hbs`),
}),
new ExtractTextPlugin(`assets/css/${name}/[name]_[hash:4].css`),
new webpack.optimize.OccurenceOrderPlugin(true),
new webpack.optimize.UglifyJsPlugin({
compress: {
screw_ie8: true, // jscs:ignore requireCamelCaseOrUpperCaseIdentifiers
warnings : false,
},
}),
]
}
}
| getConfig |
doc.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Documentation generation for rustbuild.
//!
//! This module implements generation for all bits and pieces of documentation
//! for the Rust project. This notably includes suites like the rust book, the
//! nomicon, standalone documentation, etc.
//!
//! Everything here is basically just a shim around calling either `rustbook` or
//! `rustdoc`.
use std::fs::{self, File};
use std::io::prelude::*;
use std::path::Path;
use std::process::Command;
use build::{Build, Compiler, Mode};
use build::util::{up_to_date, cp_r};
/// Invoke `rustbook` as compiled in `stage` for `target` for the doc book
/// `name` into the `out` path.
///
/// This will not actually generate any documentation if the documentation has
/// already been generated.
pub fn rustbook(build: &Build, stage: u32, target: &str, name: &str, out: &Path) {
t!(fs::create_dir_all(out));
let out = out.join(name);
let compiler = Compiler::new(stage, &build.config.build);
let src = build.src.join("src/doc").join(name);
let index = out.join("index.html");
let rustbook = build.tool(&compiler, "rustbook");
if up_to_date(&src, &index) && up_to_date(&rustbook, &index) {
return
}
println!("Rustbook stage{} ({}) - {}", stage, target, name);
let _ = fs::remove_dir_all(&out);
build.run(build.tool_cmd(&compiler, "rustbook")
.arg("build")
.arg(&src)
.arg(out));
}
/// Generates all standalone documentation as compiled by the rustdoc in `stage`
/// for the `target` into `out`.
///
/// This will list all of `src/doc` looking for markdown files and appropriately
/// perform transformations like substituting `VERSION`, `SHORT_HASH`, and
/// `STAMP` alongw ith providing the various header/footer HTML we've cutomized.
///
/// In the end, this is just a glorified wrapper around rustdoc!
pub fn standalone(build: &Build, stage: u32, target: &str, out: &Path) {
println!("Documenting stage{} standalone ({})", stage, target);
t!(fs::create_dir_all(out));
let compiler = Compiler::new(stage, &build.config.build);
let favicon = build.src.join("src/doc/favicon.inc");
let footer = build.src.join("src/doc/footer.inc");
let full_toc = build.src.join("src/doc/full-toc.inc");
t!(fs::copy(build.src.join("src/doc/rust.css"), out.join("rust.css")));
let version_input = build.src.join("src/doc/version_info.html.template");
let version_info = out.join("version_info.html");
if !up_to_date(&version_input, &version_info) {
let mut info = String::new();
t!(t!(File::open(&version_input)).read_to_string(&mut info));
let blank = String::new();
let short = build.short_ver_hash.as_ref().unwrap_or(&blank);
let hash = build.ver_hash.as_ref().unwrap_or(&blank);
let info = info.replace("VERSION", &build.release)
.replace("SHORT_HASH", short)
.replace("STAMP", hash);
t!(t!(File::create(&version_info)).write_all(info.as_bytes()));
}
for file in t!(fs::read_dir(build.src.join("src/doc"))) {
let file = t!(file);
let path = file.path();
let filename = path.file_name().unwrap().to_str().unwrap();
if !filename.ends_with(".md") || filename == "README.md" {
continue
}
let html = out.join(filename).with_extension("html");
let rustdoc = build.rustdoc(&compiler);
if up_to_date(&path, &html) &&
up_to_date(&footer, &html) &&
up_to_date(&favicon, &html) &&
up_to_date(&full_toc, &html) &&
up_to_date(&version_info, &html) &&
up_to_date(&rustdoc, &html) {
continue
}
let mut cmd = Command::new(&rustdoc);
build.add_rustc_lib_path(&compiler, &mut cmd);
cmd.arg("--html-after-content").arg(&footer)
.arg("--html-before-content").arg(&version_info)
.arg("--html-in-header").arg(&favicon)
.arg("--markdown-playground-url")
.arg("https://play.rust-lang.org/")
.arg("-o").arg(out)
.arg(&path);
if filename == "reference.md" {
cmd.arg("--html-in-header").arg(&full_toc);
}
if filename == "not_found.md" {
cmd.arg("--markdown-no-toc")
.arg("--markdown-css")
.arg("https://doc.rust-lang.org/rust.css");
} else {
cmd.arg("--markdown-css").arg("rust.css");
}
build.run(&mut cmd);
}
}
/// Compile all standard library documentation.
///
/// This will generate all documentation for the standard library and its
/// dependencies. This is largely just a wrapper around `cargo doc`.
pub fn std(build: &Build, stage: u32, target: &str, out: &Path) {
println!("Documenting stage{} std ({})", stage, target);
t!(fs::create_dir_all(out));
let compiler = Compiler::new(stage, &build.config.build);
let out_dir = build.stage_out(&compiler, Mode::Libstd)
.join(target).join("doc");
let rustdoc = build.rustdoc(&compiler);
build.clear_if_dirty(&out_dir, &rustdoc);
let mut cargo = build.cargo(&compiler, Mode::Libstd, target, "doc");
cargo.arg("--manifest-path")
.arg(build.src.join("src/rustc/std_shim/Cargo.toml"))
.arg("--features").arg(build.std_features());
build.run(&mut cargo);
cp_r(&out_dir, out)
}
/// Compile all libtest documentation.
///
/// This will generate all documentation for libtest and its dependencies. This
/// is largely just a wrapper around `cargo doc`.
pub fn test(build: &Build, stage: u32, target: &str, out: &Path) {
println!("Documenting stage{} test ({})", stage, target);
let compiler = Compiler::new(stage, &build.config.build);
let out_dir = build.stage_out(&compiler, Mode::Libtest)
.join(target).join("doc");
let rustdoc = build.rustdoc(&compiler);
build.clear_if_dirty(&out_dir, &rustdoc);
let mut cargo = build.cargo(&compiler, Mode::Libtest, target, "doc");
cargo.arg("--manifest-path")
.arg(build.src.join("src/rustc/test_shim/Cargo.toml"));
build.run(&mut cargo);
cp_r(&out_dir, out)
}
/// Generate all compiler documentation.
///
/// This will generate all documentation for the compiler libraries and their
/// dependencies. This is largely just a wrapper around `cargo doc`.
pub fn rustc(build: &Build, stage: u32, target: &str, out: &Path) {
println!("Documenting stage{} compiler ({})", stage, target);
let compiler = Compiler::new(stage, &build.config.build);
let out_dir = build.stage_out(&compiler, Mode::Librustc)
.join(target).join("doc");
let rustdoc = build.rustdoc(&compiler);
if !up_to_date(&rustdoc, &out_dir.join("rustc/index.html")) {
t!(fs::remove_dir_all(&out_dir));
}
let mut cargo = build.cargo(&compiler, Mode::Librustc, target, "doc");
cargo.arg("--manifest-path")
.arg(build.src.join("src/rustc/Cargo.toml"))
.arg("--features").arg(build.rustc_features());
build.run(&mut cargo);
cp_r(&out_dir, out)
}
/// Generates the HTML rendered error-index by running the
/// `error_index_generator` tool.
pub fn error_index(build: &Build, stage: u32, target: &str, out: &Path) | {
println!("Documenting stage{} error index ({})", stage, target);
t!(fs::create_dir_all(out));
let compiler = Compiler::new(stage, &build.config.build);
let mut index = build.tool_cmd(&compiler, "error_index_generator");
index.arg("html");
index.arg(out.join("error-index.html"));
// FIXME: shouldn't have to pass this env var
index.env("CFG_BUILD", &build.config.build);
build.run(&mut index);
} |
|
response_code.py | class RET:
|
error_map = {
RET.OK: '成功',
RET.DBERR: '数据库查询错误',
RET.NODATA: '无数据',
RET.DATAEXIST: '数据已存在',
RET.DATAERR: '数据错误',
RET.SESSIONERR: '用户未登录',
RET.LOGINERR: '用户登录失败',
RET.PARAMERR: '参数错误',
RET.USERERR: '用户不存在或未激活',
RET.ROLEERR: '用户身份错误',
RET.PWDERR: '密码错误',
RET.REQERR: '非法请求或请求次数受限',
RET.IPERR: 'IP受限',
RET.THIRDERR: '第三方系统错误',
RET.IOERR: '文件读写错误',
RET.SERVERERR: '内部错误',
RET.UNKOWNERR: '未知错误',
}
| OK = '0'
DBERR = '4001'
NODATA = '4002'
DATAEXIST = '4003'
DATAERR = '4004'
SESSIONERR = '4101'
LOGINERR = '4102'
PARAMERR = '4103'
USERERR = '4104'
ROLEERR = '4105'
PWDERR = '4106'
REQERR = '4201'
IPERR = '4202'
THIRDERR = '4301'
IOERR = '4302'
SERVERERR = '4500'
UNKOWNERR = '4501' |
shape.rs | use crate::geom::{P2, V2};
pub enum RectSide {
Top,
Bottom,
Left,
Right,
}
#[derive(Copy, Clone, Debug)]
pub struct Rect {
pub pos: P2,
pub size: V2,
}
impl Rect {
pub fn new(pos: P2, size: V2) -> Rect {
Rect { pos, size }
}
#[inline]
pub fn left(&self) -> f32 |
#[inline]
pub fn right(&self) -> f32 {
self.left() + self.size.x
}
#[inline]
pub fn top(&self) -> f32 {
self.pos.y
}
#[inline]
pub fn bottom(&self) -> f32 {
self.top() + self.size.y
}
pub fn overlaps(&self, other: &Rect) -> bool {
!(self.right() <= other.left()
|| self.left() >= other.right()
|| self.top() >= other.bottom()
|| self.bottom() <= other.top())
}
pub fn touches(&self, other: &Rect) -> bool {
!(self.right() < other.left()
|| self.left() > other.right()
|| self.top() > other.bottom()
|| self.bottom() < other.top())
}
pub fn collided_side(&self, rect_t0: &Rect, rect_t1: &Rect) -> RectSide {
assert_eq!(self.overlaps(rect_t0), false);
assert_eq!(self.overlaps(rect_t1), true);
if rect_t0.left() >= self.right() && rect_t1.left() < self.right() {
RectSide::Right
} else if rect_t0.right() <= self.left() && rect_t1.right() > self.left() {
RectSide::Left
} else if rect_t0.top() >= self.bottom() && rect_t1.top() < self.bottom() {
RectSide::Bottom
} else {
assert_eq!(
rect_t0.bottom() <= self.top() && rect_t1.bottom() > self.top(),
true
);
RectSide::Top
}
}
}
| {
self.pos.x
} |
clif-json.rs | //! Utility for `cranelift_serde`.
#![deny(
missing_docs,
trivial_numeric_casts,
unused_extern_crates,
unstable_features
)]
#![warn(unused_import_braces)]
#![cfg_attr(feature = "clippy", plugin(clippy(conf_file = "../../clippy.toml")))]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::new_without_default))]
#![cfg_attr(
feature = "cargo-clippy",
warn(
clippy::float_arithmetic,
clippy::mut_mut,
clippy::nonminimal_bool,
clippy::map_unwrap_or,
clippy::clippy::unicode_not_nfc,
clippy::use_self
)
)]
use clap::{App, Arg, SubCommand};
use cranelift_codegen::ir::Function;
use cranelift_reader::parse_functions;
use std::fs::File;
use std::io::prelude::*;
use std::io::{self, Write};
use std::process;
fn call_ser(file: &str, pretty: bool) -> Result<(), String> {
let ret_of_parse = parse_functions(file);
match ret_of_parse {
Ok(funcs) => {
let ser_str = if pretty {
serde_json::to_string_pretty(&funcs).unwrap()
} else {
serde_json::to_string(&funcs).unwrap() | };
println!("{}", ser_str);
Ok(())
}
Err(_pe) => Err("There was a parsing error".to_string()),
}
}
fn call_de(file: &File) -> Result<(), String> {
let de: Vec<Function> = match serde_json::from_reader(file) {
Result::Ok(val) => val,
Result::Err(err) => panic!("{}", err),
};
println!("{:?}", de);
Ok(())
}
fn main() {
let matches = App::new("Cranelift JSON serializer/deserializer utility")
.subcommand(
SubCommand::with_name("serialize")
.display_order(1)
.about("Serializes Cranelift IR into JSON.")
.arg(Arg::with_name("pretty").short("p").help("pretty json"))
.arg(
Arg::with_name("FILE")
.required(true)
.value_name("FILE")
.help("Input file for serialization"),
),
)
.subcommand(
SubCommand::with_name("deserialize")
.about("Deserializes Cranelift IR into JSON.")
.arg(
Arg::with_name("FILE")
.required(true)
.value_name("FILE")
.help("Input file for deserialization"),
),
)
.get_matches();
let res_serde = match matches.subcommand() {
("serialize", Some(m)) => {
let mut file =
File::open(m.value_of("FILE").unwrap()).expect("Unable to open the file");
let mut contents = String::new();
file.read_to_string(&mut contents)
.expect("Unable to read the file");
match m.occurrences_of("pretty") {
0 => call_ser(&contents, false),
_ => call_ser(&contents, true),
}
}
("deserialize", Some(m)) => {
let file = File::open(m.value_of("FILE").unwrap()).expect("Unable to open the file");
call_de(&file)
}
_ => Err("Invalid subcommand.".to_string()),
};
if let Err(mut msg) = res_serde {
if !msg.ends_with('\n') {
msg.push('\n');
}
io::stdout().flush().expect("flushing stdout");
io::stderr().write_all(msg.as_bytes()).unwrap();
process::exit(1);
}
} | |
KDCReq.go | package messages
// Reference: https://www.ietf.org/rfc/rfc4120.txt
// Section: 5.4.1
import (
"crypto/rand"
"fmt"
"math"
"math/big"
"time"
"github.com/jcmturner/gofork/encoding/asn1"
"github.com/ropnop/gokrb5/asn1tools"
"github.com/ropnop/gokrb5/config"
"github.com/ropnop/gokrb5/crypto"
"github.com/ropnop/gokrb5/iana"
"github.com/ropnop/gokrb5/iana/asnAppTag"
"github.com/ropnop/gokrb5/iana/flags"
"github.com/ropnop/gokrb5/iana/keyusage"
"github.com/ropnop/gokrb5/iana/msgtype"
"github.com/ropnop/gokrb5/iana/nametype"
"github.com/ropnop/gokrb5/iana/patype"
"github.com/ropnop/gokrb5/krberror"
"github.com/ropnop/gokrb5/types"
)
type marshalKDCReq struct {
PVNO int `asn1:"explicit,tag:1"`
MsgType int `asn1:"explicit,tag:2"`
PAData types.PADataSequence `asn1:"explicit,optional,tag:3"`
ReqBody asn1.RawValue `asn1:"explicit,tag:4"`
}
// KDCReqFields represents the KRB_KDC_REQ fields.
type KDCReqFields struct {
PVNO int
MsgType int
PAData types.PADataSequence
ReqBody KDCReqBody
Renewal bool
}
// ASReq implements RFC 4120 KRB_AS_REQ: https://tools.ietf.org/html/rfc4120#section-5.4.1.
type ASReq struct {
KDCReqFields
}
// TGSReq implements RFC 4120 KRB_TGS_REQ: https://tools.ietf.org/html/rfc4120#section-5.4.1.
type TGSReq struct {
KDCReqFields
}
type marshalKDCReqBody struct {
KDCOptions asn1.BitString `asn1:"explicit,tag:0"`
CName types.PrincipalName `asn1:"explicit,optional,tag:1"`
Realm string `asn1:"generalstring,explicit,tag:2"`
SName types.PrincipalName `asn1:"explicit,optional,tag:3"`
From time.Time `asn1:"generalized,explicit,optional,tag:4"`
Till time.Time `asn1:"generalized,explicit,tag:5"`
RTime time.Time `asn1:"generalized,explicit,optional,tag:6"`
Nonce int `asn1:"explicit,tag:7"`
EType []int32 `asn1:"explicit,tag:8"`
Addresses []types.HostAddress `asn1:"explicit,optional,tag:9"`
EncAuthData types.EncryptedData `asn1:"explicit,optional,tag:10"`
// Ticket needs to be a raw value as it is wrapped in an APPLICATION tag
AdditionalTickets asn1.RawValue `asn1:"explicit,optional,tag:11"`
}
// KDCReqBody implements the KRB_KDC_REQ request body.
type KDCReqBody struct {
KDCOptions asn1.BitString `asn1:"explicit,tag:0"`
CName types.PrincipalName `asn1:"explicit,optional,tag:1"`
Realm string `asn1:"generalstring,explicit,tag:2"`
SName types.PrincipalName `asn1:"explicit,optional,tag:3"`
From time.Time `asn1:"generalized,explicit,optional,tag:4"`
Till time.Time `asn1:"generalized,explicit,tag:5"`
RTime time.Time `asn1:"generalized,explicit,optional,tag:6"`
Nonce int `asn1:"explicit,tag:7"`
EType []int32 `asn1:"explicit,tag:8"`
Addresses []types.HostAddress `asn1:"explicit,optional,tag:9"`
EncAuthData types.EncryptedData `asn1:"explicit,optional,tag:10"`
AdditionalTickets []Ticket `asn1:"explicit,optional,tag:11"`
}
// NewASReqForTGT generates a new KRB_AS_REQ struct for a TGT request.
func NewASReqForTGT(realm string, c *config.Config, cname types.PrincipalName) (ASReq, error) {
sname := types.PrincipalName{
NameType: nametype.KRB_NT_SRV_INST,
NameString: []string{"krbtgt", realm},
}
return NewASReq(realm, c, cname, sname)
}
// NewASReqForChgPasswd generates a new KRB_AS_REQ struct for a change password request.
func NewASReqForChgPasswd(realm string, c *config.Config, cname types.PrincipalName) (ASReq, error) {
sname := types.PrincipalName{ | NameType: nametype.KRB_NT_PRINCIPAL,
NameString: []string{"kadmin", "changepw"},
}
return NewASReq(realm, c, cname, sname)
}
// NewASReq generates a new KRB_AS_REQ struct for a given SNAME.
func NewASReq(realm string, c *config.Config, cname, sname types.PrincipalName) (ASReq, error) {
nonce, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt32))
if err != nil {
return ASReq{}, err
}
t := time.Now().UTC()
// Copy the default options to make this thread safe
kopts := types.NewKrbFlags()
copy(kopts.Bytes, c.LibDefaults.KDCDefaultOptions.Bytes)
kopts.BitLength = c.LibDefaults.KDCDefaultOptions.BitLength
a := ASReq{
KDCReqFields{
PVNO: iana.PVNO,
MsgType: msgtype.KRB_AS_REQ,
PAData: types.PADataSequence{},
ReqBody: KDCReqBody{
KDCOptions: kopts,
Realm: realm,
CName: cname,
SName: sname,
Till: t.Add(c.LibDefaults.TicketLifetime),
Nonce: int(nonce.Int64()),
EType: c.LibDefaults.DefaultTktEnctypeIDs,
},
},
}
if c.LibDefaults.Forwardable {
types.SetFlag(&a.ReqBody.KDCOptions, flags.Forwardable)
}
if c.LibDefaults.Canonicalize {
types.SetFlag(&a.ReqBody.KDCOptions, flags.Canonicalize)
}
if c.LibDefaults.Proxiable {
types.SetFlag(&a.ReqBody.KDCOptions, flags.Proxiable)
}
if c.LibDefaults.RenewLifetime != 0 {
types.SetFlag(&a.ReqBody.KDCOptions, flags.Renewable)
a.ReqBody.RTime = t.Add(c.LibDefaults.RenewLifetime)
a.ReqBody.RTime = t.Add(time.Duration(48) * time.Hour)
}
if !c.LibDefaults.NoAddresses {
ha, err := types.LocalHostAddresses()
if err != nil {
return a, fmt.Errorf("could not get local addresses: %v", err)
}
ha = append(ha, types.HostAddressesFromNetIPs(c.LibDefaults.ExtraAddresses)...)
a.ReqBody.Addresses = ha
}
return a, nil
}
// NewTGSReq generates a new KRB_TGS_REQ struct.
func NewTGSReq(cname types.PrincipalName, kdcRealm string, c *config.Config, tgt Ticket, sessionKey types.EncryptionKey, sname types.PrincipalName, renewal bool) (TGSReq, error) {
a, err := tgsReq(cname, sname, kdcRealm, renewal, c)
if err != nil {
return a, err
}
err = a.setPAData(tgt, sessionKey)
return a, err
}
// NewUser2UserTGSReq returns a TGS-REQ suitable for user-to-user authentication (https://tools.ietf.org/html/rfc4120#section-3.7)
func NewUser2UserTGSReq(cname types.PrincipalName, kdcRealm string, c *config.Config, clientTGT Ticket, sessionKey types.EncryptionKey, sname types.PrincipalName, renewal bool, verifyingTGT Ticket) (TGSReq, error) {
a, err := tgsReq(cname, sname, kdcRealm, renewal, c)
if err != nil {
return a, err
}
a.ReqBody.AdditionalTickets = []Ticket{verifyingTGT}
types.SetFlag(&a.ReqBody.KDCOptions, flags.EncTktInSkey)
err = a.setPAData(clientTGT, sessionKey)
return a, err
}
// tgsReq populates the fields for a TGS_REQ
func tgsReq(cname, sname types.PrincipalName, kdcRealm string, renewal bool, c *config.Config) (TGSReq, error) {
nonce, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt32))
if err != nil {
return TGSReq{}, err
}
t := time.Now().UTC()
k := KDCReqFields{
PVNO: iana.PVNO,
MsgType: msgtype.KRB_TGS_REQ,
ReqBody: KDCReqBody{
KDCOptions: types.NewKrbFlags(),
Realm: kdcRealm,
CName: cname, // Add the CName to make validation of the reply easier
SName: sname,
Till: t.Add(c.LibDefaults.TicketLifetime),
Nonce: int(nonce.Int64()),
EType: c.LibDefaults.DefaultTGSEnctypeIDs,
},
Renewal: renewal,
}
if c.LibDefaults.Forwardable {
types.SetFlag(&k.ReqBody.KDCOptions, flags.Forwardable)
}
if c.LibDefaults.Canonicalize {
types.SetFlag(&k.ReqBody.KDCOptions, flags.Canonicalize)
}
if c.LibDefaults.Proxiable {
types.SetFlag(&k.ReqBody.KDCOptions, flags.Proxiable)
}
if c.LibDefaults.RenewLifetime > time.Duration(0) {
types.SetFlag(&k.ReqBody.KDCOptions, flags.Renewable)
k.ReqBody.RTime = t.Add(c.LibDefaults.RenewLifetime)
}
if !c.LibDefaults.NoAddresses {
ha, err := types.LocalHostAddresses()
if err != nil {
return TGSReq{}, fmt.Errorf("could not get local addresses: %v", err)
}
ha = append(ha, types.HostAddressesFromNetIPs(c.LibDefaults.ExtraAddresses)...)
k.ReqBody.Addresses = ha
}
if renewal {
types.SetFlag(&k.ReqBody.KDCOptions, flags.Renew)
types.SetFlag(&k.ReqBody.KDCOptions, flags.Renewable)
}
return TGSReq{
k,
}, nil
}
func (k *TGSReq) setPAData(tgt Ticket, sessionKey types.EncryptionKey) error {
// Marshal the request and calculate checksum
b, err := k.ReqBody.Marshal()
if err != nil {
return krberror.Errorf(err, krberror.EncodingError, "error marshaling TGS_REQ body")
}
etype, err := crypto.GetEtype(sessionKey.KeyType)
if err != nil {
return krberror.Errorf(err, krberror.EncryptingError, "error getting etype to encrypt authenticator")
}
cb, err := etype.GetChecksumHash(sessionKey.KeyValue, b, keyusage.TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR_CHKSUM)
if err != nil {
return krberror.Errorf(err, krberror.ChksumError, "error getting etype checksum hash")
}
// Form PAData for TGS_REQ
// Create authenticator
auth, err := types.NewAuthenticator(tgt.Realm, k.ReqBody.CName)
if err != nil {
return krberror.Errorf(err, krberror.KRBMsgError, "error generating new authenticator")
}
auth.Cksum = types.Checksum{
CksumType: etype.GetHashID(),
Checksum: cb,
}
// Create AP_REQ
apReq, err := NewAPReq(tgt, sessionKey, auth)
if err != nil {
return krberror.Errorf(err, krberror.KRBMsgError, "error generating new AP_REQ")
}
apb, err := apReq.Marshal()
if err != nil {
return krberror.Errorf(err, krberror.EncodingError, "error marshaling AP_REQ for pre-authentication data")
}
k.PAData = types.PADataSequence{
types.PAData{
PADataType: patype.PA_TGS_REQ,
PADataValue: apb,
},
}
return nil
}
// Unmarshal bytes b into the ASReq struct.
func (k *ASReq) Unmarshal(b []byte) error {
var m marshalKDCReq
_, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.ASREQ))
if err != nil {
return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling AS_REQ")
}
expectedMsgType := msgtype.KRB_AS_REQ
if m.MsgType != expectedMsgType {
return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a AS_REQ. Expected: %v; Actual: %v", expectedMsgType, m.MsgType)
}
var reqb KDCReqBody
err = reqb.Unmarshal(m.ReqBody.Bytes)
if err != nil {
return krberror.Errorf(err, krberror.EncodingError, "error processing AS_REQ body")
}
k.MsgType = m.MsgType
k.PAData = m.PAData
k.PVNO = m.PVNO
k.ReqBody = reqb
return nil
}
// Unmarshal bytes b into the TGSReq struct.
func (k *TGSReq) Unmarshal(b []byte) error {
var m marshalKDCReq
_, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.TGSREQ))
if err != nil {
return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling TGS_REQ")
}
expectedMsgType := msgtype.KRB_TGS_REQ
if m.MsgType != expectedMsgType {
return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a TGS_REQ. Expected: %v; Actual: %v", expectedMsgType, m.MsgType)
}
var reqb KDCReqBody
err = reqb.Unmarshal(m.ReqBody.Bytes)
if err != nil {
return krberror.Errorf(err, krberror.EncodingError, "error processing TGS_REQ body")
}
k.MsgType = m.MsgType
k.PAData = m.PAData
k.PVNO = m.PVNO
k.ReqBody = reqb
return nil
}
// Unmarshal bytes b into the KRB_KDC_REQ body struct.
func (k *KDCReqBody) Unmarshal(b []byte) error {
var m marshalKDCReqBody
_, err := asn1.Unmarshal(b, &m)
if err != nil {
return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling KDC_REQ body")
}
k.KDCOptions = m.KDCOptions
if len(k.KDCOptions.Bytes) < 4 {
tb := make([]byte, 4-len(k.KDCOptions.Bytes))
k.KDCOptions.Bytes = append(tb, k.KDCOptions.Bytes...)
k.KDCOptions.BitLength = len(k.KDCOptions.Bytes) * 8
}
k.CName = m.CName
k.Realm = m.Realm
k.SName = m.SName
k.From = m.From
k.Till = m.Till
k.RTime = m.RTime
k.Nonce = m.Nonce
k.EType = m.EType
k.Addresses = m.Addresses
k.EncAuthData = m.EncAuthData
if len(m.AdditionalTickets.Bytes) > 0 {
k.AdditionalTickets, err = unmarshalTicketsSequence(m.AdditionalTickets)
if err != nil {
return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling additional tickets")
}
}
return nil
}
// Marshal ASReq struct.
func (k *ASReq) Marshal() ([]byte, error) {
m := marshalKDCReq{
PVNO: k.PVNO,
MsgType: k.MsgType,
PAData: k.PAData,
}
b, err := k.ReqBody.Marshal()
if err != nil {
var mk []byte
return mk, err
}
m.ReqBody = asn1.RawValue{
Class: asn1.ClassContextSpecific,
IsCompound: true,
Tag: 4,
Bytes: b,
}
mk, err := asn1.Marshal(m)
if err != nil {
return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling AS_REQ")
}
mk = asn1tools.AddASNAppTag(mk, asnAppTag.ASREQ)
return mk, nil
}
// Marshal TGSReq struct.
func (k *TGSReq) Marshal() ([]byte, error) {
m := marshalKDCReq{
PVNO: k.PVNO,
MsgType: k.MsgType,
PAData: k.PAData,
}
b, err := k.ReqBody.Marshal()
if err != nil {
var mk []byte
return mk, err
}
m.ReqBody = asn1.RawValue{
Class: asn1.ClassContextSpecific,
IsCompound: true,
Tag: 4,
Bytes: b,
}
mk, err := asn1.Marshal(m)
if err != nil {
return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling AS_REQ")
}
mk = asn1tools.AddASNAppTag(mk, asnAppTag.TGSREQ)
return mk, nil
}
// Marshal KRB_KDC_REQ body struct.
func (k *KDCReqBody) Marshal() ([]byte, error) {
var b []byte
m := marshalKDCReqBody{
KDCOptions: k.KDCOptions,
CName: k.CName,
Realm: k.Realm,
SName: k.SName,
From: k.From,
Till: k.Till,
RTime: k.RTime,
Nonce: k.Nonce,
EType: k.EType,
Addresses: k.Addresses,
EncAuthData: k.EncAuthData,
}
rawtkts, err := MarshalTicketSequence(k.AdditionalTickets)
if err != nil {
return b, krberror.Errorf(err, krberror.EncodingError, "error in marshaling KDC request body additional tickets")
}
//The asn1.rawValue needs the tag setting on it for where it is in the KDCReqBody
rawtkts.Tag = 11
if len(rawtkts.Bytes) > 0 {
m.AdditionalTickets = rawtkts
}
b, err = asn1.Marshal(m)
if err != nil {
return b, krberror.Errorf(err, krberror.EncodingError, "error in marshaling KDC request body")
}
return b, nil
} | |
lib.rs | use std::collections::HashMap;
use ark_ec::{PairingEngine, ProjectiveCurve};
use ark_ff::UniformRand;
use rand::RngCore;
/// Key pair
#[derive(Clone, Copy, Debug)]
pub struct KeyPair<E: PairingEngine> {
/// Public Key
public_key: E::G2Projective,
/// Private Key
private_key: E::G1Projective,
}
impl<E: PairingEngine> KeyPair<E> {
/// derives key to decryp to message published in the broadcast channel
pub fn derive_key(
&self,
id: usize,
reader_ids: Vec<usize>,
channel_capacity: usize,
channel_p_set: Vec<E::G1Projective>,
header: (E::G2Projective, E::G1Projective),
) -> E::Fqk {
let mut k = E::pairing(header.1, self.public_key);
let mut sum_g1 = self.private_key;
for j in reader_ids {
if j == id {
continue;
}
sum_g1 += &channel_p_set[channel_capacity + 1 - j + id];
}
let k_denom = E::pairing(sum_g1, header.0);
k /= &k_denom;
k
}
}
/// ReaderPool of channel consumers of the channel
#[derive(Clone, Debug)]
pub struct ReaderPool<E: PairingEngine> {
pub list: HashMap<usize, KeyPair<E>>, // perhaps change from usize to an hash(usize)?
}
impl<E: PairingEngine> ReaderPool<E> {
pub fn new() -> Self {
return ReaderPool {
list: HashMap::new(),
};
}
}
pub struct BroadcastPubKey<E: PairingEngine> {
pub p_set: Vec<E::G1Projective>,
pub v: E::G1Projective,
pub q: E::G2Projective,
pub q_1: E::G2Projective,
}
pub struct BroadcastChannel<E: PairingEngine> {
pub channel_pubkey: BroadcastPubKey<E>,
pub participants: ReaderPool<E>,
pub capacity: usize,
}
impl<E: PairingEngine> BroadcastChannel<E> {
/// setup for a new broadcast channel with `n` readers
pub fn new<R: RngCore>(capacity: usize, rng: &mut R) -> Self {
let p_gen = E::G1Projective::prime_subgroup_generator();
let q_gen = E::G2Projective::prime_subgroup_generator();
let rnd_alpha = E::Fr::rand(rng);
let rnd_gamma = E::Fr::rand(rng);
let mut p_set = Vec::new();
let mut participants = ReaderPool::new();
let mut v = E::G1Projective::prime_subgroup_generator();
v *= rnd_gamma;
p_set.push(p_gen);
for i in 1..2 * capacity + 1 {
if i == capacity + 1 {
continue;
}
// TODO: keep state of the previous run to cut on computation
p_set.push(exp(&p_gen, rnd_alpha, i));
}
for i in 0..capacity {
let public_key = exp(&q_gen, rnd_alpha, i);
let private_key = exp(&p_set[i], rnd_gamma, i);
let key_pair = KeyPair {
public_key,
private_key,
};
participants.list.insert(i, key_pair);
}
let channel_pubkey = BroadcastPubKey {
p_set,
v,
q: q_gen,
q_1: exp(&q_gen, rnd_alpha, 1),
};
BroadcastChannel {
capacity,
channel_pubkey,
participants,
}
}
/// encrypts message to publish in the channel
pub fn encrypt<R: RngCore>(
&self, | reader_ids: Vec<usize>,
rng: &mut R,
) -> ((E::G2Projective, E::G1Projective), E::Fqk) {
let rnd_k = E::Fr::rand(rng);
// K
// K=e(Pn+1,Q)^k
let mut qk = self.channel_pubkey.q_1;
qk *= rnd_k;
let k = E::pairing(self.channel_pubkey.p_set[self.capacity], qk);
// Header
let mut sum_g1 = self.channel_pubkey.v; // init Sum as `Sum = V`
for j in reader_ids {
sum_g1 += &self.channel_pubkey.p_set[self.capacity + 1 - j];
}
sum_g1 *= rnd_k;
let header = (qk, sum_g1);
(header, k)
}
}
/// Calculates exponetiation of g with f, i times
fn exp<P>(g: &P, f: P::ScalarField, i: usize) -> P
where
P: ProjectiveCurve,
{
let mut g_result = *g;
for _ in 0..i {
g_result *= f
}
g_result
}
#[cfg(test)]
mod test {
use super::*;
use ark_bls12_381::Bls12_381;
use rand::thread_rng;
#[test]
fn test_e2e() {
let rng = &mut thread_rng();
let capacity = 3; // number of receivers
let setup = BroadcastChannel::<Bls12_381>::new(capacity, rng);
assert_eq!(setup.participants.list.len(), capacity);
assert_eq!(setup.channel_pubkey.p_set.len(), capacity * 2);
let s = vec![0, 1, 2]; // receiver group
let encrypt_setup = setup.encrypt(s.clone(), rng);
let header = encrypt_setup.0;
let encryption_key = encrypt_setup.1;
let reader0 = setup.participants.list[&0];
let key_r0 = reader0.derive_key(0, s, capacity, setup.channel_pubkey.p_set, header);
assert_eq!(key_r0, encryption_key);
//let key_s2 = setup.decrypt(1, s.clone(), header);
//assert_eq!(key_s2, encryption_key);
//let key_s3 = setup.decrypt(2, s.clone(), header);
//assert_eq!(key_s3, encryption_key);
}
} | |
main.py | from UI import interface |
interface.heading()
answer = interface.menu()
if answer == '1':
mean, deviation, confidence = interface.readDataNormalDistribution()
x1, x2, significancePoint = ci.confidenceIntervalForNormalDistribution(mean, deviation, confidence)
print("\033[;36m" + f'THE RANDOM VARIABLE IS IN THE INTERVAL ({x1}, {x2})'
f' WITH A CONFIDENCE OF {confidence}%')
ci.graphConfidenceInterval(mean, deviation, confidence, x1, x2, significancePoint * 100)
elif answer == '2':
deviation, sample, sampleMean, confidence = interface.readDataPopulationMean()
x1, x2, significancePoint = ci.confidenceIntervalForPopulationMean(deviation, sample, sampleMean, confidence)
print("\033[;36m" + f'THE POPULATION AVERAGE μ IS IN THE INTERVAL ({x1}, {x2}) '
f'WITH A CONFIDENCE OF {confidence}%')
ci.graphConfidenceInterval((x1+x2)/2, deviation, confidence, x1, x2, significancePoint * 100, False) | from Model import confidenceInterval as ci |
node.go | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package node defines the concept of a Bee node
// by bootstrapping and injecting all necessary
// dependencies.
package node
import (
"context"
"crypto/ecdsa"
"fmt"
"io"
"log"
"math/big"
"net"
"net/http"
"path/filepath"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethersphere/bee/pkg/accounting"
"github.com/ethersphere/bee/pkg/addressbook"
"github.com/ethersphere/bee/pkg/api"
"github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/debugapi"
"github.com/ethersphere/bee/pkg/feeds/factory"
"github.com/ethersphere/bee/pkg/hive"
"github.com/ethersphere/bee/pkg/localstore"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/metrics"
"github.com/ethersphere/bee/pkg/netstore"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/p2p/libp2p"
"github.com/ethersphere/bee/pkg/pingpong"
"github.com/ethersphere/bee/pkg/pinning"
"github.com/ethersphere/bee/pkg/pricer"
"github.com/ethersphere/bee/pkg/pricing"
"github.com/ethersphere/bee/pkg/pss"
"github.com/ethersphere/bee/pkg/puller"
"github.com/ethersphere/bee/pkg/pullsync"
"github.com/ethersphere/bee/pkg/pullsync/pullstorage"
"github.com/ethersphere/bee/pkg/pusher"
"github.com/ethersphere/bee/pkg/pushsync"
"github.com/ethersphere/bee/pkg/recovery"
"github.com/ethersphere/bee/pkg/resolver/multiresolver"
"github.com/ethersphere/bee/pkg/retrieval"
settlement "github.com/ethersphere/bee/pkg/settlement"
"github.com/ethersphere/bee/pkg/settlement/pseudosettle"
"github.com/ethersphere/bee/pkg/settlement/swap"
"github.com/ethersphere/bee/pkg/settlement/swap/chequebook"
"github.com/ethersphere/bee/pkg/settlement/swap/transaction"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
"github.com/ethersphere/bee/pkg/topology/kademlia"
"github.com/ethersphere/bee/pkg/topology/lightnode"
"github.com/ethersphere/bee/pkg/tracing"
"github.com/ethersphere/bee/pkg/traversal"
ma "github.com/multiformats/go-multiaddr"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
type Bee struct {
p2pService io.Closer
p2pCancel context.CancelFunc
apiCloser io.Closer
apiServer *http.Server
debugAPIServer *http.Server
resolverCloser io.Closer
errorLogWriter *io.PipeWriter
tracerCloser io.Closer
tagsCloser io.Closer
stateStoreCloser io.Closer
localstoreCloser io.Closer
topologyCloser io.Closer
pusherCloser io.Closer
pullerCloser io.Closer
pullSyncCloser io.Closer
pssCloser io.Closer
ethClientCloser func()
transactionMonitorCloser io.Closer
recoveryHandleCleanup func()
}
type Options struct {
DataDir string
DBCapacity uint64
DBOpenFilesLimit uint64
DBWriteBufferSize uint64
DBBlockCacheCapacity uint64
DBDisableSeeksCompaction bool
APIAddr string
DebugAPIAddr string
Addr string
NATAddr string
EnableWS bool
EnableQUIC bool
WelcomeMessage string
Bootnodes []string
CORSAllowedOrigins []string
Logger logging.Logger
Standalone bool
TracingEnabled bool
TracingEndpoint string
TracingServiceName string
GlobalPinningEnabled bool
PaymentThreshold string
PaymentTolerance string
PaymentEarly string
ResolverConnectionCfgs []multiresolver.ConnectionConfig
GatewayMode bool
BootnodeMode bool
SwapEndpoint string
SwapFactoryAddress string
SwapInitialDeposit string
SwapEnable bool
FullNodeMode bool
}
func NewBee(addr string, swarmAddress swarm.Address, publicKey ecdsa.PublicKey, signer crypto.Signer, networkID uint64, logger logging.Logger, libp2pPrivateKey, pssPrivateKey *ecdsa.PrivateKey, o Options) (b *Bee, err error) |
func (b *Bee) Shutdown(ctx context.Context) error {
errs := new(multiError)
if b.apiCloser != nil {
if err := b.apiCloser.Close(); err != nil {
errs.add(fmt.Errorf("api: %w", err))
}
}
var eg errgroup.Group
if b.apiServer != nil {
eg.Go(func() error {
if err := b.apiServer.Shutdown(ctx); err != nil {
return fmt.Errorf("api server: %w", err)
}
return nil
})
}
if b.debugAPIServer != nil {
eg.Go(func() error {
if err := b.debugAPIServer.Shutdown(ctx); err != nil {
return fmt.Errorf("debug api server: %w", err)
}
return nil
})
}
if err := eg.Wait(); err != nil {
errs.add(err)
}
if b.recoveryHandleCleanup != nil {
b.recoveryHandleCleanup()
}
if err := b.pusherCloser.Close(); err != nil {
errs.add(fmt.Errorf("pusher: %w", err))
}
if err := b.pullerCloser.Close(); err != nil {
errs.add(fmt.Errorf("puller: %w", err))
}
if err := b.pullSyncCloser.Close(); err != nil {
errs.add(fmt.Errorf("pull sync: %w", err))
}
if err := b.pssCloser.Close(); err != nil {
errs.add(fmt.Errorf("pss: %w", err))
}
b.p2pCancel()
if err := b.p2pService.Close(); err != nil {
errs.add(fmt.Errorf("p2p server: %w", err))
}
if err := b.transactionMonitorCloser.Close(); err != nil {
errs.add(fmt.Errorf("transaction monitor: %w", err))
}
if c := b.ethClientCloser; c != nil {
c()
}
if err := b.tracerCloser.Close(); err != nil {
errs.add(fmt.Errorf("tracer: %w", err))
}
if err := b.tagsCloser.Close(); err != nil {
errs.add(fmt.Errorf("tag persistence: %w", err))
}
if err := b.stateStoreCloser.Close(); err != nil {
errs.add(fmt.Errorf("statestore: %w", err))
}
if err := b.localstoreCloser.Close(); err != nil {
errs.add(fmt.Errorf("localstore: %w", err))
}
if err := b.topologyCloser.Close(); err != nil {
errs.add(fmt.Errorf("topology driver: %w", err))
}
if err := b.errorLogWriter.Close(); err != nil {
errs.add(fmt.Errorf("error log writer: %w", err))
}
// Shutdown the resolver service only if it has been initialized.
if b.resolverCloser != nil {
if err := b.resolverCloser.Close(); err != nil {
errs.add(fmt.Errorf("resolver service: %w", err))
}
}
if errs.hasErrors() {
return errs
}
return nil
}
type multiError struct {
errors []error
}
func (e *multiError) Error() string {
if len(e.errors) == 0 {
return ""
}
s := e.errors[0].Error()
for _, err := range e.errors[1:] {
s += "; " + err.Error()
}
return s
}
func (e *multiError) add(err error) {
e.errors = append(e.errors, err)
}
func (e *multiError) hasErrors() bool {
return len(e.errors) > 0
}
| {
tracer, tracerCloser, err := tracing.NewTracer(&tracing.Options{
Enabled: o.TracingEnabled,
Endpoint: o.TracingEndpoint,
ServiceName: o.TracingServiceName,
})
if err != nil {
return nil, fmt.Errorf("tracer: %w", err)
}
p2pCtx, p2pCancel := context.WithCancel(context.Background())
defer func() {
// if there's been an error on this function
// we'd like to cancel the p2p context so that
// incoming connections will not be possible
if err != nil {
p2pCancel()
}
}()
b = &Bee{
p2pCancel: p2pCancel,
errorLogWriter: logger.WriterLevel(logrus.ErrorLevel),
tracerCloser: tracerCloser,
}
var debugAPIService *debugapi.Service
if o.DebugAPIAddr != "" {
overlayEthAddress, err := signer.EthereumAddress()
if err != nil {
return nil, fmt.Errorf("eth address: %w", err)
}
// set up basic debug api endpoints for debugging and /health endpoint
debugAPIService = debugapi.New(swarmAddress, publicKey, pssPrivateKey.PublicKey, overlayEthAddress, logger, tracer, o.CORSAllowedOrigins)
debugAPIListener, err := net.Listen("tcp", o.DebugAPIAddr)
if err != nil {
return nil, fmt.Errorf("debug api listener: %w", err)
}
debugAPIServer := &http.Server{
IdleTimeout: 30 * time.Second,
ReadHeaderTimeout: 3 * time.Second,
Handler: debugAPIService,
ErrorLog: log.New(b.errorLogWriter, "", 0),
}
go func() {
logger.Infof("debug api address: %s", debugAPIListener.Addr())
if err := debugAPIServer.Serve(debugAPIListener); err != nil && err != http.ErrServerClosed {
logger.Debugf("debug api server: %v", err)
logger.Error("unable to serve debug api")
}
}()
b.debugAPIServer = debugAPIServer
}
stateStore, err := InitStateStore(logger, o.DataDir)
if err != nil {
return nil, err
}
b.stateStoreCloser = stateStore
err = CheckOverlayWithStore(swarmAddress, stateStore)
if err != nil {
return nil, err
}
addressbook := addressbook.New(stateStore)
var swapBackend *ethclient.Client
var overlayEthAddress common.Address
var chainID int64
var transactionService transaction.Service
var transactionMonitor transaction.Monitor
var chequebookFactory chequebook.Factory
var chequebookService chequebook.Service
var chequeStore chequebook.ChequeStore
var cashoutService chequebook.CashoutService
if o.SwapEnable {
swapBackend, overlayEthAddress, chainID, transactionMonitor, transactionService, err = InitChain(
p2pCtx,
logger,
stateStore,
o.SwapEndpoint,
signer,
)
if err != nil {
return nil, err
}
b.ethClientCloser = swapBackend.Close
b.transactionMonitorCloser = transactionMonitor
chequebookFactory, err = InitChequebookFactory(
logger,
swapBackend,
chainID,
transactionService,
o.SwapFactoryAddress,
)
if err != nil {
return nil, err
}
if err = chequebookFactory.VerifyBytecode(p2pCtx); err != nil {
return nil, fmt.Errorf("factory fail: %w", err)
}
chequebookService, err = InitChequebookService(
p2pCtx,
logger,
stateStore,
signer,
chainID,
swapBackend,
overlayEthAddress,
transactionService,
chequebookFactory,
o.SwapInitialDeposit,
)
if err != nil {
return nil, err
}
chequeStore, cashoutService = initChequeStoreCashout(
stateStore,
swapBackend,
chequebookFactory,
chainID,
overlayEthAddress,
transactionService,
)
}
lightNodes := lightnode.NewContainer()
p2ps, err := libp2p.New(p2pCtx, signer, networkID, swarmAddress, addr, addressbook, stateStore, lightNodes, logger, tracer, libp2p.Options{
PrivateKey: libp2pPrivateKey,
NATAddr: o.NATAddr,
EnableWS: o.EnableWS,
EnableQUIC: o.EnableQUIC,
Standalone: o.Standalone,
WelcomeMessage: o.WelcomeMessage,
FullNode: o.FullNodeMode,
})
if err != nil {
return nil, fmt.Errorf("p2p service: %w", err)
}
b.p2pService = p2ps
if !o.Standalone {
if natManager := p2ps.NATManager(); natManager != nil {
// wait for nat manager to init
logger.Debug("initializing NAT manager")
select {
case <-natManager.Ready():
// this is magic sleep to give NAT time to sync the mappings
// this is a hack, kind of alchemy and should be improved
time.Sleep(3 * time.Second)
logger.Debug("NAT manager initialized")
case <-time.After(10 * time.Second):
logger.Warning("NAT manager init timeout")
}
}
}
// Construct protocols.
pingPong := pingpong.New(p2ps, logger, tracer)
if err = p2ps.AddProtocol(pingPong.Protocol()); err != nil {
return nil, fmt.Errorf("pingpong service: %w", err)
}
hive := hive.New(p2ps, addressbook, networkID, logger)
if err = p2ps.AddProtocol(hive.Protocol()); err != nil {
return nil, fmt.Errorf("hive service: %w", err)
}
var bootnodes []ma.Multiaddr
if o.Standalone {
logger.Info("Starting node in standalone mode, no p2p connections will be made or accepted")
} else {
for _, a := range o.Bootnodes {
addr, err := ma.NewMultiaddr(a)
if err != nil {
logger.Debugf("multiaddress fail %s: %v", a, err)
logger.Warningf("invalid bootnode address %s", a)
continue
}
bootnodes = append(bootnodes, addr)
}
}
var settlement settlement.Interface
var swapService *swap.Service
kad := kademlia.New(swarmAddress, addressbook, hive, p2ps, logger, kademlia.Options{Bootnodes: bootnodes, StandaloneMode: o.Standalone, BootnodeMode: o.BootnodeMode})
b.topologyCloser = kad
hive.SetAddPeersHandler(kad.AddPeers)
p2ps.SetPickyNotifier(kad)
paymentThreshold, ok := new(big.Int).SetString(o.PaymentThreshold, 10)
if !ok {
return nil, fmt.Errorf("invalid payment threshold: %s", paymentThreshold)
}
pricer := pricer.NewFixedPricer(swarmAddress, 1000000000)
pricing := pricing.New(p2ps, logger, paymentThreshold)
if err = p2ps.AddProtocol(pricing.Protocol()); err != nil {
return nil, fmt.Errorf("pricing service: %w", err)
}
addrs, err := p2ps.Addresses()
if err != nil {
return nil, fmt.Errorf("get server addresses: %w", err)
}
for _, addr := range addrs {
logger.Debugf("p2p address: %s", addr)
}
if o.SwapEnable {
swapService, err = InitSwap(
p2ps,
logger,
stateStore,
networkID,
overlayEthAddress,
chequebookService,
chequeStore,
cashoutService,
)
if err != nil {
return nil, err
}
settlement = swapService
} else {
pseudosettleService := pseudosettle.New(p2ps, logger, stateStore)
if err = p2ps.AddProtocol(pseudosettleService.Protocol()); err != nil {
return nil, fmt.Errorf("pseudosettle service: %w", err)
}
settlement = pseudosettleService
}
paymentTolerance, ok := new(big.Int).SetString(o.PaymentTolerance, 10)
if !ok {
return nil, fmt.Errorf("invalid payment tolerance: %s", paymentTolerance)
}
paymentEarly, ok := new(big.Int).SetString(o.PaymentEarly, 10)
if !ok {
return nil, fmt.Errorf("invalid payment early: %s", paymentEarly)
}
acc, err := accounting.NewAccounting(
paymentThreshold,
paymentTolerance,
paymentEarly,
logger,
stateStore,
settlement,
pricing,
)
if err != nil {
return nil, fmt.Errorf("accounting: %w", err)
}
pricing.SetPaymentThresholdObserver(acc)
settlement.SetNotifyPaymentFunc(acc.AsyncNotifyPayment)
var path string
if o.DataDir != "" {
path = filepath.Join(o.DataDir, "localstore")
}
lo := &localstore.Options{
Capacity: o.DBCapacity,
OpenFilesLimit: o.DBOpenFilesLimit,
BlockCacheCapacity: o.DBBlockCacheCapacity,
WriteBufferSize: o.DBWriteBufferSize,
DisableSeeksCompaction: o.DBDisableSeeksCompaction,
}
storer, err := localstore.New(path, swarmAddress.Bytes(), lo, logger)
if err != nil {
return nil, fmt.Errorf("localstore: %w", err)
}
b.localstoreCloser = storer
retrieve := retrieval.New(swarmAddress, storer, p2ps, kad, logger, acc, pricer, tracer)
tagService := tags.NewTags(stateStore, logger)
b.tagsCloser = tagService
pssService := pss.New(pssPrivateKey, logger)
b.pssCloser = pssService
var ns storage.Storer
if o.GlobalPinningEnabled {
// create recovery callback for content repair
recoverFunc := recovery.NewCallback(pssService)
ns = netstore.New(storer, recoverFunc, retrieve, logger)
} else {
ns = netstore.New(storer, nil, retrieve, logger)
}
traversalService := traversal.NewService(ns)
pinningService := pinning.NewService(storer, stateStore, traversalService)
pushSyncProtocol := pushsync.New(swarmAddress, p2ps, storer, kad, tagService, o.FullNodeMode, pssService.TryUnwrap, logger, acc, pricer, signer, tracer)
// set the pushSyncer in the PSS
pssService.SetPushSyncer(pushSyncProtocol)
if o.GlobalPinningEnabled {
// register function for chunk repair upon receiving a trojan message
chunkRepairHandler := recovery.NewRepairHandler(ns, logger, pushSyncProtocol)
b.recoveryHandleCleanup = pssService.Register(recovery.Topic, chunkRepairHandler)
}
pusherService := pusher.New(networkID, storer, kad, pushSyncProtocol, tagService, logger, tracer)
b.pusherCloser = pusherService
pullStorage := pullstorage.New(storer)
pullSyncProtocol := pullsync.New(p2ps, pullStorage, pssService.TryUnwrap, logger)
b.pullSyncCloser = pullSyncProtocol
pullerService := puller.New(stateStore, kad, pullSyncProtocol, logger, puller.Options{})
b.pullerCloser = pullerService
retrieveProtocolSpec := retrieve.Protocol()
pushSyncProtocolSpec := pushSyncProtocol.Protocol()
pullSyncProtocolSpec := pullSyncProtocol.Protocol()
if o.FullNodeMode {
logger.Info("starting in full mode")
} else {
logger.Info("starting in light mode")
p2p.WithBlocklistStreams(p2p.DefaultBlocklistTime, retrieveProtocolSpec)
p2p.WithBlocklistStreams(p2p.DefaultBlocklistTime, pushSyncProtocolSpec)
p2p.WithBlocklistStreams(p2p.DefaultBlocklistTime, pullSyncProtocolSpec)
}
if err = p2ps.AddProtocol(retrieveProtocolSpec); err != nil {
return nil, fmt.Errorf("retrieval service: %w", err)
}
if err = p2ps.AddProtocol(pushSyncProtocolSpec); err != nil {
return nil, fmt.Errorf("pushsync service: %w", err)
}
if err = p2ps.AddProtocol(pullSyncProtocolSpec); err != nil {
return nil, fmt.Errorf("pullsync protocol: %w", err)
}
multiResolver := multiresolver.NewMultiResolver(
multiresolver.WithConnectionConfigs(o.ResolverConnectionCfgs),
multiresolver.WithLogger(o.Logger),
)
b.resolverCloser = multiResolver
var apiService api.Service
if o.APIAddr != "" {
// API server
feedFactory := factory.New(ns)
apiService = api.New(tagService, ns, multiResolver, pssService, traversalService, pinningService, feedFactory, logger, tracer, api.Options{
CORSAllowedOrigins: o.CORSAllowedOrigins,
GatewayMode: o.GatewayMode,
WsPingPeriod: 60 * time.Second,
})
apiListener, err := net.Listen("tcp", o.APIAddr)
if err != nil {
return nil, fmt.Errorf("api listener: %w", err)
}
apiServer := &http.Server{
IdleTimeout: 30 * time.Second,
ReadHeaderTimeout: 3 * time.Second,
Handler: apiService,
ErrorLog: log.New(b.errorLogWriter, "", 0),
}
go func() {
logger.Infof("api address: %s", apiListener.Addr())
if err := apiServer.Serve(apiListener); err != nil && err != http.ErrServerClosed {
logger.Debugf("api server: %v", err)
logger.Error("unable to serve api")
}
}()
b.apiServer = apiServer
b.apiCloser = apiService
}
if debugAPIService != nil {
// register metrics from components
debugAPIService.MustRegisterMetrics(p2ps.Metrics()...)
debugAPIService.MustRegisterMetrics(pingPong.Metrics()...)
debugAPIService.MustRegisterMetrics(acc.Metrics()...)
debugAPIService.MustRegisterMetrics(storer.Metrics()...)
debugAPIService.MustRegisterMetrics(pullerService.Metrics()...)
debugAPIService.MustRegisterMetrics(pushSyncProtocol.Metrics()...)
debugAPIService.MustRegisterMetrics(pusherService.Metrics()...)
debugAPIService.MustRegisterMetrics(pullSyncProtocol.Metrics()...)
debugAPIService.MustRegisterMetrics(retrieve.Metrics()...)
if pssServiceMetrics, ok := pssService.(metrics.Collector); ok {
debugAPIService.MustRegisterMetrics(pssServiceMetrics.Metrics()...)
}
if apiService != nil {
debugAPIService.MustRegisterMetrics(apiService.Metrics()...)
}
if l, ok := logger.(metrics.Collector); ok {
debugAPIService.MustRegisterMetrics(l.Metrics()...)
}
if l, ok := settlement.(metrics.Collector); ok {
debugAPIService.MustRegisterMetrics(l.Metrics()...)
}
// inject dependencies and configure full debug api http path routes
debugAPIService.Configure(p2ps, pingPong, kad, lightNodes, storer, tagService, acc, settlement, o.SwapEnable, swapService, chequebookService)
}
if err := kad.Start(p2pCtx); err != nil {
return nil, err
}
p2ps.Ready()
return b, nil
} |
Base.controller.ts | import Controller from 'sap/ui/core/mvc/Controller'
import Fragment from 'sap/ui/core/Fragment'
import i18n from '../util/i18n'
export default class Base extends Controller {
getText = i18n.getText.bind(null)
loadFragment<FragmentType>(name: string): Promise<FragmentType> {
return Fragment.load({
name,
controller: this, | } | }) as Promise<FragmentType>
} |
vault.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Objects relating to sourcing connections & variables from Hashicorp Vault"""
from typing import Optional
from airflow.providers.hashicorp._internal_client.vault_client import _VaultClient # noqa
from airflow.secrets import BaseSecretsBackend
from airflow.utils.log.logging_mixin import LoggingMixin
# pylint: disable=too-many-instance-attributes,too-many-locals
class VaultBackend(BaseSecretsBackend, LoggingMixin):
"""
Retrieves Connections and Variables from Hashicorp Vault.
Configurable via ``airflow.cfg`` as follows:
.. code-block:: ini
[secrets]
backend = airflow.providers.hashicorp.secrets.vault.VaultBackend
backend_kwargs = {
"connections_path": "connections",
"url": "http://127.0.0.1:8200",
"mount_point": "airflow"
}
For example, if your keys are under ``connections`` path in ``airflow`` mount_point, this
would be accessible if you provide ``{"connections_path": "connections"}`` and request
conn_id ``smtp_default``.
:param connections_path: Specifies the path of the secret to read to get Connections.
(default: 'connections'). If set to None (null), requests for connections will not be sent to Vault.
:type connections_path: str
:param variables_path: Specifies the path of the secret to read to get Variable.
(default: 'variables'). If set to None (null), requests for variables will not be sent to Vault.
:type variables_path: str
:param config_path: Specifies the path of the secret to read Airflow Configurations
(default: 'config'). If set to None (null), requests for configurations will not be sent to Vault.
:type config_path: str
:param url: Base URL for the Vault instance being addressed.
:type url: str
:param auth_type: Authentication Type for Vault. Default is ``token``. Available values are:
('approle', 'aws_iam', 'azure', 'github', 'gcp', 'kubernetes', 'ldap', 'radius', 'token', 'userpass')
:type auth_type: str
:param auth_mount_point: It can be used to define mount_point for authentication chosen
Default depends on the authentication method used.
:type auth_mount_point: str
:param mount_point: The "path" the secret engine was mounted on. Default is "secret". Note that
this mount_point is not used for authentication if authentication is done via a
different engine. For authentication mount_points see, auth_mount_point.
:type mount_point: str
:param kv_engine_version: Select the version of the engine to run (``1`` or ``2``, default: ``2``).
:type kv_engine_version: int
:param token: Authentication token to include in requests sent to Vault.
(for ``token`` and ``github`` auth_type)
:type token: str
:param token_path: path to file containing authentication token to include in requests sent to Vault
(for ``token`` and ``github`` auth_type).
:type token_path: str
:param username: Username for Authentication (for ``ldap`` and ``userpass`` auth_type).
:type username: str
:param password: Password for Authentication (for ``ldap`` and ``userpass`` auth_type).
:type password: str
:param key_id: Key ID for Authentication (for ``aws_iam`` and ''azure`` auth_type).
:type key_id: str
:param secret_id: Secret ID for Authentication (for ``approle``, ``aws_iam`` and ``azure`` auth_types).
:type secret_id: str
:param role_id: Role ID for Authentication (for ``approle``, ``aws_iam`` auth_types).
:type role_id: str
:param kubernetes_role: Role for Authentication (for ``kubernetes`` auth_type).
:type kubernetes_role: str
:param kubernetes_jwt_path: Path for kubernetes jwt token (for ``kubernetes`` auth_type, default:
``/var/run/secrets/kubernetes.io/serviceaccount/token``).
:type kubernetes_jwt_path: str
:param gcp_key_path: Path to Google Cloud Service Account key file (JSON) (for ``gcp`` auth_type).
Mutually exclusive with gcp_keyfile_dict.
:type gcp_key_path: str
:param gcp_keyfile_dict: Dictionary of keyfile parameters. (for ``gcp`` auth_type).
Mutually exclusive with gcp_key_path.
:type gcp_keyfile_dict: dict
:param gcp_scopes: Comma-separated string containing OAuth2 scopes (for ``gcp`` auth_type).
:type gcp_scopes: str
:param azure_tenant_id: The tenant id for the Azure Active Directory (for ``azure`` auth_type).
:type azure_tenant_id: str
:param azure_resource: The configured URL for the application registered in Azure Active Directory
(for ``azure`` auth_type).
:type azure_resource: str
:param radius_host: Host for radius (for ``radius`` auth_type).
:type radius_host: str
:param radius_secret: Secret for radius (for ``radius`` auth_type).
:type radius_secret: str
:param radius_port: Port for radius (for ``radius`` auth_type).
:type radius_port: str
"""
def __init__( # pylint: disable=too-many-arguments
self,
connections_path: str = 'connections',
variables_path: str = 'variables',
config_path: str = 'config',
url: Optional[str] = None,
auth_type: str = 'token',
auth_mount_point: Optional[str] = None,
mount_point: str = 'secret',
kv_engine_version: int = 2,
token: Optional[str] = None,
token_path: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
key_id: Optional[str] = None,
secret_id: Optional[str] = None,
role_id: Optional[str] = None,
kubernetes_role: Optional[str] = None,
kubernetes_jwt_path: str = '/var/run/secrets/kubernetes.io/serviceaccount/token',
gcp_key_path: Optional[str] = None,
gcp_keyfile_dict: Optional[dict] = None,
gcp_scopes: Optional[str] = None,
azure_tenant_id: Optional[str] = None,
azure_resource: Optional[str] = None,
radius_host: Optional[str] = None,
radius_secret: Optional[str] = None,
radius_port: Optional[int] = None,
**kwargs,
):
super().__init__()
if connections_path is not None:
self.connections_path = connections_path.rstrip('/')
else:
self.connections_path = connections_path
if variables_path is not None:
self.variables_path = variables_path.rstrip('/')
else:
self.variables_path = variables_path
if config_path is not None:
self.config_path = config_path.rstrip('/')
else:
self.config_path = config_path
self.mount_point = mount_point
self.kv_engine_version = kv_engine_version
self.vault_client = _VaultClient(
url=url,
auth_type=auth_type,
auth_mount_point=auth_mount_point,
mount_point=mount_point,
kv_engine_version=kv_engine_version,
token=token,
token_path=token_path,
username=username,
password=password,
key_id=key_id,
secret_id=secret_id,
role_id=role_id,
kubernetes_role=kubernetes_role,
kubernetes_jwt_path=kubernetes_jwt_path,
gcp_key_path=gcp_key_path,
gcp_keyfile_dict=gcp_keyfile_dict,
gcp_scopes=gcp_scopes,
azure_tenant_id=azure_tenant_id,
azure_resource=azure_resource,
radius_host=radius_host,
radius_secret=radius_secret,
radius_port=radius_port,
**kwargs,
)
def get_conn_uri(self, conn_id: str) -> Optional[str]:
"""
Get secret value from Vault. Store the secret in the form of URI
:param conn_id: The connection id
:type conn_id: str
:rtype: str
:return: The connection uri retrieved from the secret
"""
if self.connections_path is None:
return None
else:
secret_path = self.build_path(self.connections_path, conn_id)
response = self.vault_client.get_secret(secret_path=secret_path)
return response.get("conn_uri") if response else None
def get_variable(self, key: str) -> Optional[str]:
"""
Get Airflow Variable
:param key: Variable Key
:type key: str
:rtype: str
:return: Variable Value retrieved from the vault
"""
if self.variables_path is None:
return None
else:
|
def get_config(self, key: str) -> Optional[str]:
"""
Get Airflow Configuration
:param key: Configuration Option Key
:type key: str
:rtype: str
:return: Configuration Option Value retrieved from the vault
"""
if self.config_path is None:
return None
else:
secret_path = self.build_path(self.config_path, key)
response = self.vault_client.get_secret(secret_path=secret_path)
return response.get("value") if response else None
| secret_path = self.build_path(self.variables_path, key)
response = self.vault_client.get_secret(secret_path=secret_path)
return response.get("value") if response else None |
TestOSPluginStepping.py | """
Test that stepping works even when the OS Plugin doesn't report
all threads at every stop.
"""
from __future__ import print_function
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
import lldbsuite.test.lldbutil as lldbutil
class TestOSPluginStepping(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@skipIfWindows
@skipIf(oslist=["freebsd"], bugnumber="llvm.org/pr48352")
def test_python_os_plugin(self):
"""Test that stepping works when the OS Plugin doesn't report all
threads at every stop"""
self.build()
self.main_file = lldb.SBFileSpec('main.cpp')
self.run_python_os_step_missing_thread(False)
@skipIfWindows
@skipIf(oslist=["freebsd"], bugnumber="llvm.org/pr48352")
def test_python_os_plugin_prune(self):
"""Test that pruning the unreported PlanStacks works"""
self.build()
self.main_file = lldb.SBFileSpec('main.cpp')
self.run_python_os_step_missing_thread(True)
def get_os_thread(self):
return self.process.GetThreadByID(0x111111111)
def is_os_thread(self, thread):
id = thread.GetID()
return id == 0x111111111 |
def run_python_os_step_missing_thread(self, do_prune):
"""Test that the Python operating system plugin works correctly"""
# Our OS plugin does NOT report all threads:
result = self.dbg.HandleCommand("settings set process.experimental.os-plugin-reports-all-threads false")
python_os_plugin_path = os.path.join(self.getSourceDir(),
"operating_system.py")
(target, self.process, thread, thread_bkpt) = lldbutil.run_to_source_breakpoint(
self, "first stop in thread - do a step out", self.main_file)
main_bkpt = target.BreakpointCreateBySourceRegex('Stop here and do not make a memory thread for thread_1',
self.main_file)
self.assertEqual(main_bkpt.GetNumLocations(), 1, "Main breakpoint has one location")
# There should not be an os thread before we load the plugin:
self.assertFalse(self.get_os_thread().IsValid(), "No OS thread before loading plugin")
# Now load the python OS plug-in which should update the thread list and we should have
# an OS plug-in thread overlaying thread_1 with id 0x111111111
command = "settings set target.process.python-os-plugin-path '%s'" % python_os_plugin_path
self.dbg.HandleCommand(command)
# Verify our OS plug-in threads showed up
os_thread = self.get_os_thread()
self.assertTrue(
os_thread.IsValid(),
"Make sure we added the thread 0x111111111 after we load the python OS plug-in")
# Now we are going to step-out. This should get interrupted by main_bkpt. We've
# set up the OS plugin so at this stop, we have lost the OS thread 0x111111111.
# Make sure both of these are true:
os_thread.StepOut()
stopped_threads = lldbutil.get_threads_stopped_at_breakpoint(self.process, main_bkpt)
self.assertEqual(len(stopped_threads), 1, "Stopped at main_bkpt")
thread = self.process.GetThreadByID(0x111111111)
self.assertFalse(thread.IsValid(), "No thread 0x111111111 on second stop.")
# Make sure we still have the thread plans for this thread:
# First, don't show unreported threads, that should fail:
command = "thread plan list -t 0x111111111"
result = lldb.SBCommandReturnObject()
interp = self.dbg.GetCommandInterpreter()
interp.HandleCommand(command, result)
self.assertFalse(result.Succeeded(), "We found no plans for the unreported thread.")
# Now do it again but with the -u flag:
command = "thread plan list -u -t 0x111111111"
result = lldb.SBCommandReturnObject()
interp.HandleCommand(command, result)
self.assertTrue(result.Succeeded(), "We found plans for the unreported thread.")
if do_prune:
# Prune the thread plan and continue, and we will run to exit.
interp.HandleCommand("thread plan prune 0x111111111", result)
self.assertTrue(result.Succeeded(), "Found the plan for 0x111111111 and pruned it")
# List again, make sure it doesn't work:
command = "thread plan list -u -t 0x111111111"
interp.HandleCommand(command, result)
self.assertFalse(result.Succeeded(), "We still found plans for the unreported thread.")
self.process.Continue()
self.assertEqual(self.process.GetState(), lldb.eStateExited, "We exited.")
else:
# Now we are going to continue, and when we hit the step-out breakpoint, we will
# put the OS plugin thread back, lldb will recover its ThreadPlanStack, and
# we will stop with a "step-out" reason.
self.process.Continue()
os_thread = self.get_os_thread()
self.assertTrue(os_thread.IsValid(), "The OS thread is back after continue")
self.assertIn("step out", os_thread.GetStopDescription(100), "Completed step out plan") | |
virtualExchangeService.py | from twisted.internet import defer, task
from twisted.python.failure import Failure
from exchanges.base import ExchangeService
from exchange import calcVirtualOrderBooks
import copy
import time
def defaultErrHandler(failure):
print(failure.getBriefTraceback())
return failure
def handleMultipleErr(data):
flag = True
for state, err in res:
if not state:
print(err)
flag = False
return flag
class OrderData(object):
def __init__(self, orders=None):
if orders is None:
self._orders = {}
self._newId = 0
else:
self._orders = orders
self._newId = max(self._orders) + 1
def resetNewId(self):
self._newId = max(self._orders) + 1
def _takeNewId(self):
id = self._newId
self._newId = self._newId + 1
return id
def getNewId(self):
return self._newId
def recordOrder(self, orderInfo):
key = self._takeNewId()
self._orders[key] = copy.deepcopy(orderInfo)
return key
def getOrder(self, orderId, defaultValue = None):
return copy.deepcopy(self._orders.get(orderId, defaultValue))
def getOrderRef(self, orderId, defaultValue = None):
if orderId in self._orders:
return self._orders[orderId]
else:
return defaultValue
def getOrders(self):
return copy.deepcopy(self._orders)
def delOrder(self, orderId):
if orderId in self._orders:
del self._orders[orderId]
def loadData(self, path):
pass # TODO
def saveData(self, path):
pass # TODO
class VirtualExchange(ExchangeService):
def __init__(self, exchange, mediums, orders = None):
self.exchange = exchange
if not isinstance(mediums, tuple):
raise TypeError("type of 'mediums' must be 'tuple'")
self.medium = mediums[0]
self.orderBookData = None
self.orderBookPairs = None
if orders is None:
self.orders = OrderData()
else:
self.orders = orders
self.retryTimes = 3
self.retryWaitTime = 1 # second
def cleanOrderBookData(self):
self.orderBookData = None
def setMediums(self, mediums):
if not isinstance(mediums, tuple):
raise TypeError("type of 'medium' must be 'tuple'")
self.medium = mediums[0]
self.cleanOrderBookData()
def getBalance(self, coin):
return exchange.getBalance(coin)
def getBalances(self, coins=None):
return exchange.getBalances(coins)
def getOrderState(self, statusA, statusB):
unusual = ('error', 'cancelled')
if statusA == 'done' and statusB == 'done':
status = 'done'
elif statusA == 'cancelled' and statusB == 'cancelled':
status = 'cancelled'
elif statusA in unusual or statusB in unusual: # TODO: could be improved
status = 'error'
else:
status = 'open'
return status
def getOrderBook(self, pairs):
|
def buy(self, pairs, price, amount):
data = self.orderBookData
# check if data is available
if data is None:
d = defer.fail(Exception('No available order book data'))
elif pairs != self.orderBookPairs:
d = defer.fail(Exception("coin pairs 'pairs' does not match the order book data"))
else:
PRICE, AMOUNT = range(2)
(_, sell), (_, mediumSell) = data
overflow = False
A = amount
B = price * amount
M = 0
# calculate the amount of medium
sumM = 0
for l, order in enumerate(sell):
s = sumM + order[AMOUNT]
if s == A:
M = sum(mediumSell[:l + 1])
break
elif s > A:
M = sum(mediumSell[:l]) + (A - sumM) / order[AMOUNT] * mediumSell[l]
break
sumM = s
else:
overflow = True
if overflow:
d = defer.fail(Exception("'amount' is too big"))
else:
# initiate transaction
symbol = self.exchange.getSymbol(pairs)
dA = lambda :self.exchange.buy( (pairs[0], self.medium) , M / A, A)
dB = lambda :self.exchange.buy( (self.medium, pairs[1]) , B / M, M)
@defer.inlineCallbacks
def transaction():
taskA, taskB = dA, dB
for t in range(1 + self.retryTimes):
res = yield defer.DeferredList( [taskA(), taskB()], consumeErrors=True)
(stateA, dataA), (stateB, dataB) = res
if stateA and stateB: # succeeded
break
time.sleep(self.retryWaitTime)
taskA, taskB = lambda: defer.succeed(dataA), lambda: defer.succeed(dataB)
if not stateA:
print(dataA)
print(f"start {pairs[0], self.medium} buy order failed")
print(f"retry times: {t}")
taskA = dA
if not stateB:
print(dataB)
print(f"start {self.medium, pairs[1]} buy order failed")
print(f"retry times: {t}")
taskB = dB
else:
print(f"out of retry times, starting buy order failed")
returnValue(None)
id = self.orders.recordOrder({
'orderId': (dataA, dataB),
'type': 'buy',
'initPrice': price,
'initAmount': amount,
'coinPair': symbol,
'status': 'open',
})
returnValue(id)
d = transaction()
d.addErrback(defaultErrHandler)
return d
def sell(self, pairs, price, amount):
data = self.orderBookData
# check if data is available
if data is None:
d = defer.fail(Exception('No available order book data'))
elif pairs != self.orderBookPairs:
d = defer.fail(Exception("coin pairs 'pairs' does not match the order book data"))
else:
PRICE, AMOUNT = range(2)
(buy, _), (mediumBuy, _) = data
overflow = False
A = amount
B = price * amount
M = 0
# calculate the amount of medium
sumM = 0
for l, order in enumerate(buy):
s = sumM + order[AMOUNT]
if s == A:
M = sum(mediumBuy[:l + 1])
break
elif s > A:
M = sum(mediumBuy[:l]) + (A - sumM) / order[AMOUNT] * mediumBuy[l]
break
sumM = s
else:
overflow = True
if overflow:
d = defer.fail(Exception("'amount' is too big"))
else:
# initiate transaction
symbol = self.exchange.getSymbol(pairs)
dA = lambda :self.exchange.sell( (pairs[0], self.medium) , M / A, A)
dB = lambda :self.exchange.sell( (self.medium, pairs[1]) , B / M, M)
@defer.inlineCallbacks
def transaction():
taskA, taskB = dA, dB
for t in range(1 + self.retryTimes):
res = yield defer.DeferredList( [taskA(), taskB()], consumeErrors=True)
(stateA, dataA), (stateB, dataB) = res
if stateA and stateB: # succeeded
break
time.sleep(self.retryWaitTime)
taskA, taskB = lambda: defer.succeed(dataA), lambda: defer.succeed(dataB)
if not stateA:
print(dataA)
print(f"start {pairs[0], self.medium} sell order failed")
print(f"retry times: {t}")
taskA = dA
if not stateB:
print(dataB)
print(f"start {self.medium, pairs[1]} sell order failed")
print(f"retry times: {t}")
taskB = dB
else:
print(f"out of retry times, starting sell order failed")
returnValue(None)
id = self.orders.recordOrder({
'orderId': (dataA, dataB),
'type': 'sell',
'initPrice': price,
'initAmount': amount,
'coinPair': symbol,
'status': 'open',
})
returnValue(id)
d = transaction()
d.addErrback(defaultErrHandler)
return d
def getOrder(self, pairs, orderId, fromRemote=True):
"""method to query the order info with order id
:param fromRemote: flag used to determine order data from obtained local or remote server
"""
data = self.orders.getOrder(orderId)
symbol = self.exchange.getSymbol(pairs)
# check if the orderId exist
if data is None:
d = defer.fail(Exception('this orderId does not exist'))
elif symbol != data['coinPair']:
d = defer.fail(Exception("'pairs' does not match this order"))
elif fromRemote:
idA, idB = data['orderId']
dA = self.exchange.getOrder( (pairs[0], self.medium), idA)
dB = self.exchange.getOrder( (self.medium, pairs[1]), idB)
d = defer.DeferredList( [dA, dB] , consumeErrors=True)
def handleBody(res):
if not handleMultipleErr(res):
return None
(_, resA), (_, resB) = res
statusA, statusB = resA['status'], resB['status']
status = self.getOrderState(statusA, statusB)
# update local data
self.orders.getOrderRef(orderId)['status'] = status
order = {
'orderId': orderId,
'type': data['type'],
'initPrice': data['initPrice'],
'initAmount': data['initAmount'],
'coinPair': symbol,
'status': status,
}
return order
d.addCallback(handleBody)
else:
defer.succeed(data)
d.addErrback(defaultErrHandler)
return d
def cancel(self, pairs, orderId):
data = self.orders.getOrderRef(orderId)
symbol = self.exchange.getSymbol(pairs)
# check if the orderId exist
if data is None:
d = defer.fail(Exception('this orderId does not exist'))
elif symbol != data['coinPair']:
d = defer.fail(Exception("'pairs' does not match this order"))
else:
idA, idB = data['orderId']
dA = self.exchange.cancel( (pairs[0], self.medium), idA)
dB = self.exchange.cancel( (self.medium, pairs[1]), idB)
d = defer.DeferredList( [dA, dB] , consumeErrors=True)
def handleBody(res):
if not handleMultipleErr(res):
return None
(_, (stateA, dataA)), (_, (stateB, dataB)) = res
if stateA and stateB:
data['status'] = 'cancelled'
return True
else:
return False
d.addErrback(defaultErrHandler)
return d
if __name__ == '__main__':
from exchanges.bitfinex.BitfinexService import bitfinex
VirtualExchange(bitfinex, ('ETH',) ) | self.orderBookPairs = pairs
dA = self.exchange.getOrderBook( (pairs[0], self.medium) )
dB = self.exchange.getOrderBook( (self.medium, pairs[1]) )
d = defer.DeferredList( [dA, dB], consumeErrors=True)
def handleBody(datas):
(stateA, dataA), (stateB, dataB) = datas
if stateA and stateB:
virtualOB, medium = calcVirtualOrderBooks(dataA, dataB)
self.orderBookData = (virtualOB, medium)
return virtualOB
else:
for state, data in datas:
if not state:
print(data)
self.cleanData()
return None
d.addCallback(handleBody)
d.addErrback(defaultErrHandler)
return d |
NutritionInfo.py | class NutritionInfo:
def __init__(self, name, data):
self.foodID = data['foodID']
self.allowed_cols = ['foodname', 'image', 'weight', 'weight_unit', 'calories', 'calfromfat', 'totalfat', 'saturatedfat', 'transfat', 'fat_poly', 'fat_mono', 'cholesterol', 'sodium', 'totalcarbs', 'dietaryfiber', 'sugars', 'protein', 'va', 'vc', 'vd', 'calcium', 'iron', 'potassium']
if 'weightunit' in data:
data['weight_unit'] = data['weightunit']
self.data = {k: data[k] for k in self.allowed_cols}
self.calories = data['calories']
self.foodname = name
self.image_link = data['image']
# Serving size
self.weight = data['weight']
self.weight_unit = data['weight_unit']
self.calfromfat = data['calfromfat']
self.totalfat = data['totalfat']
self.saturatedfat = data['saturatedfat']
self.transfat = data['transfat']
self.fat_poly = data['fat_poly']
self.fat_mono =data['fat_mono']
self.cholesterol = data['cholesterol']
self.sodium = data['sodium']
self.totalcarbs = data['totalcarbs'] | self.vd = data['vd']
self.calcium = data['calcium']
self.iron = data['iron']
self.potassium = data['potassium']
self.va = data['va']
self.vc = data['vc']
def getAsRow(self):
data = self.data
return [self.foodID, data['image'], self.foodname, data['weight'], data['weight_unit'], data['calories'], data['calfromfat'], data['totalfat'], data['saturatedfat'], data['transfat'], data['fat_poly'], data['fat_mono'], data['cholesterol'], data['sodium'], data['totalcarbs'], data['dietaryfiber'], data['sugars'], data['protein'], data['vd'], data['calcium'], data['iron'], data['potassium'], data['va'], data['vc']] | self.dietaryfiber = data['dietaryfiber']
self.sugars = data['sugars']
self.protein = data['protein'] |
handle.rs | #[derive(Debug)]
/// Windows handle.
pub struct Handle(*mut std::ffi::c_void);
impl From<*mut std::ffi::c_void> for Handle {
fn from(ptr: *mut std::ffi::c_void) -> Self {
Self(ptr)
}
}
#[cfg(windows)]
impl std::os::windows::io::AsRawHandle for Handle {
fn | (&self) -> std::os::windows::raw::HANDLE {
self.0
}
}
impl std::ops::Deref for Handle {
type Target = *mut std::ffi::c_void;
fn deref(&self) -> &Self::Target {
&self.0
}
}
| as_raw_handle |
solution.py | #!/usr/bin/python3
##############
# Load input #
##############
def | ():
incomplete_steps = set() # incomplete steps, initalized to all steps
requirements = dict() # map steps to the required steps that must be completed first
with open("input.txt", "r") as f:
for line in f:
step = line.split(' ')[7]
prerequisite = line.split(' ')[1]
# Make sure both are recorded, in case a step is only on one side of the rules
incomplete_steps.add(step)
incomplete_steps.add(prerequisite)
if step not in requirements:
requirements[step] = set(prerequisite)
else:
requirements[step].add(prerequisite)
return requirements, incomplete_steps
##############
# Solution 1 #
##############
requirements, incomplete_steps = load_input()
completed_steps = ""
while len(incomplete_steps) > 0:
# Process any steps with no requirements
for step in sorted(incomplete_steps):
if len(requirements.get(step, set())) == 0:
completed_steps += step
incomplete_steps.remove(step)
# Remove as prerequisite from others
requirements = {k:(v - set(step)) for k,v in requirements.items()}
break
answer = completed_steps
print(f"Solution to part 1 is {answer}")
##############
# Solution 2 #
##############
# I should look into writing a solution using async/await.
# Reload data since the original data was altered
requirements, incomplete_steps = load_input()
completed_steps = ""
total_steps = len(incomplete_steps)
# Initialize worker tracking
seconds = 0
worker_tasks = [None, None, None, None, None] # Which task is being done by each worker
worker_times = [0, 0, 0, 0, 0] # Time remaining for current task for each worker
while len(completed_steps) < total_steps:
# Pass the minimum time needed to free at least one worker
nonzero_times = [n for n in worker_times if n != 0] # Must ignore 0 as the minimum to avoid freezing time
if len(nonzero_times) > 0: # If all are zero the list will be empty and min() would return an error
min_time_left = min(nonzero_times)
seconds += min_time_left
worker_times = [n-min_time_left if n > 0 else 0 for n in worker_times]
# Process any completed jobs after this time has passed
for idx in range(len(worker_tasks)):
# Process available workers
if worker_times[idx] == 0:
if worker_tasks[idx] != None:
# Mark the completed step (if there is one)
completed_step = worker_tasks[idx]
worker_tasks[idx] = None
completed_steps += completed_step
# Update requirements to remove the completed step
requirements = {k:(v - set(completed_step)) for k,v in requirements.items()}
# Get currently available tasks
available_steps = set([step for step in incomplete_steps if len(requirements.get(step, set())) == 0])
# Assign next alphabetical steps if possible
for idx in range(len(worker_tasks)):
if worker_tasks[idx] is None and len(available_steps) > 0:
next_job = sorted(available_steps)[0] # Get job
worker_tasks[idx] = next_job # Assign job
incomplete_steps.remove(next_job) # Remove job from incomplete steps
available_steps.remove(next_job) # Remove job from current list of available steps so subsequent workers don't use it this round
next_job_time = ord(next_job) - 64 + 60 # ord("A") = 65 so next_job_time = 65 - 64 + 60 = 61
worker_times[idx] = next_job_time # Add time remaining for this new step
answer = seconds
print(f"Solution to part 2 is {answer}")
| load_input |
design3.py | #! /usr/bin/env python3
row = int(input("Enter the number of rows: "))
n = row
while n >= 0:
x = "*" * n
y = " " * (row - n) | print(y + x)
n -= 1 | |
tests_js.py | # Copyright The IETF Trust 2021, All Rights Reserved
# -*- coding: utf-8 -*-
import datetime
import debug # pyflakes:ignore
from ietf.doc.factories import WgDraftFactory
from ietf.group.factories import GroupFactory, RoleFactory, DatedGroupMilestoneFactory
from ietf.utils.jstest import IetfSeleniumTestCase, ifSeleniumEnabled, selenium_enabled
if selenium_enabled():
|
@ifSeleniumEnabled
class MilestoneTests(IetfSeleniumTestCase):
def setUp(self):
super(MilestoneTests, self).setUp()
self.wait = WebDriverWait(self.driver, 2)
self.group = GroupFactory()
self.chair = RoleFactory(group=self.group, name_id='chair').person
def _search_draft_and_locate_result(self, draft_input, search_string, draft):
"""Search for a draft and get the search result element"""
draft_input.send_keys(search_string)
result_selector = 'ul.select2-results > li > div.select2-result-label'
self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, result_selector),
draft.name
))
results = self.driver.find_elements_by_css_selector(result_selector)
matching_results = [r for r in results if draft.name in r.text]
self.assertEqual(len(matching_results), 1)
return matching_results[0]
def _click_milestone_submit_button(self, label):
submit_button_selector = 'form#milestones-form button[type="submit"]'
submit_button = self.wait.until(
expected_conditions.element_to_be_clickable((By.CSS_SELECTOR, submit_button_selector))
)
self.assertIn(label, submit_button.text)
self.scroll_to_element(submit_button)
submit_button.click()
def _assert_milestone_changed(self):
"""Wait for milestone to be marked as changed and assert that this succeeded"""
milestone_selector = 'form#milestones-form .milestone'
try:
found_expected_text = self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, milestone_selector),
'Changed'
)
)
except TimeoutException:
found_expected_text = False
self.assertTrue(found_expected_text, 'Milestone never marked as "changed"')
return self.driver.find_element_by_css_selector(milestone_selector)
def test_add_milestone(self):
draft = WgDraftFactory()
WgDraftFactory.create_batch(3) # some drafts to ignore
description = 'some description'
due_date = datetime.date.today() + datetime.timedelta(days=60)
assert(len(draft.name) > 5)
draft_search_string = draft.name[-5:]
self.login(self.chair.user.username)
url = self.absreverse('ietf.group.milestones.edit_milestones;current',
kwargs=dict(acronym=self.group.acronym))
self.driver.get(url)
add_milestone_button = self.wait.until(
expected_conditions.element_to_be_clickable(
(By.CSS_SELECTOR, 'button.add-milestone')
))
self.scroll_to_element(add_milestone_button)
add_milestone_button.click()
edit_div = self.wait.until(
expected_conditions.visibility_of_element_located(
(By.CSS_SELECTOR, 'form#milestones-form div.edit-milestone')
))
desc_input = edit_div.find_element_by_css_selector('input[id$="_desc"]')
due_input = edit_div.find_element_by_css_selector('input[id$="_due"]')
draft_input = edit_div.find_element_by_css_selector(
'div.select2-container[id$="id_docs"] input.select2-input'
)
# fill in the edit milestone form
desc_input.send_keys(description)
due_input.send_keys(due_date.strftime('%m %Y\n')) # \n closes the date selector
self._search_draft_and_locate_result(draft_input, draft_search_string, draft).click()
self._click_milestone_submit_button('Review')
result_row = self._assert_milestone_changed()
self.assertIn(description, result_row.text)
self._click_milestone_submit_button('Save')
# Wait for page to return to group page
self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, 'div#content h1'),
self.group.name
)
)
self.assertIn('1 new milestone', self.driver.page_source)
self.assertEqual(self.group.groupmilestone_set.count(), 1)
gms = self.group.groupmilestone_set.first()
self.assertEqual(gms.desc, description)
self.assertEqual(gms.due.strftime('%m %Y'), due_date.strftime('%m %Y'))
self.assertEqual(list(gms.docs.all()), [draft])
def test_edit_milestone(self):
milestone = DatedGroupMilestoneFactory(group=self.group)
draft = WgDraftFactory()
WgDraftFactory.create_batch(3) # some drafts to ignore
assert(len(draft.name) > 5)
draft_search_string = draft.name[-5:]
url = self.absreverse('ietf.group.milestones.edit_milestones;current',
kwargs=dict(acronym=self.group.acronym))
self.login(self.chair.user.username)
self.driver.get(url)
# should only be one milestone row - test will fail later if we somehow get the wrong one
edit_element = self.wait.until(
expected_conditions.element_to_be_clickable(
(By.CSS_SELECTOR, 'form#milestones-form div.milestonerow')
)
)
edit_element.click()
# find the description field corresponding to our milestone
desc_field = self.wait.until(
expected_conditions.visibility_of_element_located(
(By.CSS_SELECTOR, 'input[value="%s"]' % milestone.desc)
)
)
# Get the prefix used to identify inputs related to this milestone
prefix = desc_field.get_attribute('id')[:-4] # -4 to strip off 'desc', leave '-'
due_field = self.driver.find_element_by_id(prefix + 'due')
hidden_drafts_field = self.driver.find_element_by_id(prefix + 'docs')
draft_input = self.driver.find_element_by_css_selector(
'div.select2-container[id*="%s"] input.select2-input' % prefix
)
self.assertEqual(due_field.get_attribute('value'), milestone.due.strftime('%B %Y'))
self.assertEqual(hidden_drafts_field.get_attribute('value'),
','.join([str(doc.pk) for doc in milestone.docs.all()]))
# modify the fields
new_due_date = (milestone.due + datetime.timedelta(days=31)).strftime('%m %Y')
due_field.clear()
due_field.send_keys(new_due_date + '\n')
self._search_draft_and_locate_result(draft_input, draft_search_string, draft).click()
self._click_milestone_submit_button('Review')
self._assert_milestone_changed()
self._click_milestone_submit_button('Save')
# Wait for page to return to group page
self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, 'div#content h1'),
self.group.name
)
)
expected_desc = milestone.desc
expected_due_date = new_due_date
expected_docs = [draft]
self.assertEqual(self.group.groupmilestone_set.count(), 1)
gms = self.group.groupmilestone_set.first()
self.assertEqual(gms.desc, expected_desc)
self.assertEqual(gms.due.strftime('%m %Y'), expected_due_date)
self.assertCountEqual(expected_docs, gms.docs.all())
| from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions |
handlers.go | package server
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"path"
"sort"
"strconv"
"strings"
"sync"
"time"
oidc "github.com/coreos/go-oidc"
"github.com/gorilla/mux"
jose "gopkg.in/square/go-jose.v2"
"github.com/dexidp/dex/connector"
"github.com/dexidp/dex/server/internal"
"github.com/dexidp/dex/storage"
)
// newHealthChecker returns the healthz handler. The handler runs until the
// provided context is canceled.
func (s *Server) newHealthChecker(ctx context.Context) http.Handler {
h := &healthChecker{s: s}
// Perform one health check synchronously so the returned handler returns
// valid data immediately.
h.runHealthCheck()
go func() {
for {
select {
case <-ctx.Done():
return
case <-time.After(time.Second * 15):
}
h.runHealthCheck()
}
}()
return h
}
// healthChecker periodically performs health checks on server dependencies.
// Currently, it only checks that the storage layer is available.
type healthChecker struct {
s *Server
// Result of the last health check: any error and the amount of time it took
// to query the storage.
mu sync.RWMutex
// Guarded by the mutex
err error
passed time.Duration
}
// runHealthCheck performs a single health check and makes the result available
// for any clients performing and HTTP request against the healthChecker.
func (h *healthChecker) runHealthCheck() {
t := h.s.now()
err := checkStorageHealth(h.s.storage, h.s.now)
passed := h.s.now().Sub(t)
if err != nil {
h.s.logger.Errorf("Storage health check failed: %v", err)
}
// Make sure to only hold the mutex to access the fields, and not while
// we're querying the storage object.
h.mu.Lock()
h.err = err
h.passed = passed
h.mu.Unlock()
}
func checkStorageHealth(s storage.Storage, now func() time.Time) error |
func (h *healthChecker) ServeHTTP(w http.ResponseWriter, r *http.Request) {
h.mu.RLock()
err := h.err
t := h.passed
h.mu.RUnlock()
if err != nil {
h.s.renderError(r, w, http.StatusInternalServerError, "Health check failed.")
return
}
fmt.Fprintf(w, "Health check passed in %s", t)
}
func (s *Server) handlePublicKeys(w http.ResponseWriter, r *http.Request) {
// TODO(ericchiang): Cache this.
keys, err := s.storage.GetKeys()
if err != nil {
s.logger.Errorf("failed to get keys: %v", err)
s.renderError(r, w, http.StatusInternalServerError, "Internal server error.")
return
}
if keys.SigningKeyPub == nil {
s.logger.Errorf("No public keys found.")
s.renderError(r, w, http.StatusInternalServerError, "Internal server error.")
return
}
jwks := jose.JSONWebKeySet{
Keys: make([]jose.JSONWebKey, len(keys.VerificationKeys)+1),
}
jwks.Keys[0] = *keys.SigningKeyPub
for i, verificationKey := range keys.VerificationKeys {
jwks.Keys[i+1] = *verificationKey.PublicKey
}
data, err := json.MarshalIndent(jwks, "", " ")
if err != nil {
s.logger.Errorf("failed to marshal discovery data: %v", err)
s.renderError(r, w, http.StatusInternalServerError, "Internal server error.")
return
}
maxAge := keys.NextRotation.Sub(s.now())
if maxAge < (time.Minute * 2) {
maxAge = time.Minute * 2
}
w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%d, must-revalidate", int(maxAge.Seconds())))
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Length", strconv.Itoa(len(data)))
w.Write(data)
}
type discovery struct {
Issuer string `json:"issuer"`
Auth string `json:"authorization_endpoint"`
Token string `json:"token_endpoint"`
Keys string `json:"jwks_uri"`
UserInfo string `json:"userinfo_endpoint"`
DeviceEndpoint string `json:"device_authorization_endpoint"`
GrantTypes []string `json:"grant_types_supported"`
ResponseTypes []string `json:"response_types_supported"`
Subjects []string `json:"subject_types_supported"`
IDTokenAlgs []string `json:"id_token_signing_alg_values_supported"`
Scopes []string `json:"scopes_supported"`
AuthMethods []string `json:"token_endpoint_auth_methods_supported"`
Claims []string `json:"claims_supported"`
}
func (s *Server) discoveryHandler() (http.HandlerFunc, error) {
d := discovery{
Issuer: s.issuerURL.String(),
Auth: s.absURL("/auth"),
Token: s.absURL("/token"),
Keys: s.absURL("/keys"),
UserInfo: s.absURL("/userinfo"),
DeviceEndpoint: s.absURL("/device/code"),
Subjects: []string{"public"},
GrantTypes: []string{grantTypeAuthorizationCode, grantTypeRefreshToken, grantTypeDeviceCode},
IDTokenAlgs: []string{string(jose.RS256)},
Scopes: []string{"openid", "email", "groups", "profile", "offline_access"},
AuthMethods: []string{"client_secret_basic"},
Claims: []string{
"aud", "email", "email_verified", "exp",
"iat", "iss", "locale", "name", "sub",
},
}
for responseType := range s.supportedResponseTypes {
d.ResponseTypes = append(d.ResponseTypes, responseType)
}
sort.Strings(d.ResponseTypes)
data, err := json.MarshalIndent(d, "", " ")
if err != nil {
return nil, fmt.Errorf("failed to marshal discovery data: %v", err)
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Length", strconv.Itoa(len(data)))
w.Write(data)
}), nil
}
// handleAuthorization handles the OAuth2 auth endpoint.
func (s *Server) handleAuthorization(w http.ResponseWriter, r *http.Request) {
authReq, err := s.parseAuthorizationRequest(r)
if err != nil {
s.logger.Errorf("Failed to parse authorization request: %v", err)
status := http.StatusInternalServerError
// If this is an authErr, let's let it handle the error, or update the HTTP
// status code
if err, ok := err.(*authErr); ok {
if handler, ok := err.Handle(); ok {
// client_id and redirect_uri checked out and we can redirect back to
// the client with the error.
handler.ServeHTTP(w, r)
return
}
status = err.Status()
}
s.renderError(r, w, status, err.Error())
return
}
// TODO(ericchiang): Create this authorization request later in the login flow
// so users don't hit "not found" database errors if they wait at the login
// screen too long.
//
// See: https://github.com/dexidp/dex/issues/646
authReq.Expiry = s.now().Add(s.authRequestsValidFor)
if err := s.storage.CreateAuthRequest(*authReq); err != nil {
s.logger.Errorf("Failed to create authorization request: %v", err)
s.renderError(r, w, http.StatusInternalServerError, "Failed to connect to the database.")
return
}
connectors, err := s.storage.ListConnectors()
if err != nil {
s.logger.Errorf("Failed to get list of connectors: %v", err)
s.renderError(r, w, http.StatusInternalServerError, "Failed to retrieve connector list.")
return
}
// Redirect if a client chooses a specific connector_id
if authReq.ConnectorID != "" {
for _, c := range connectors {
if c.ID == authReq.ConnectorID {
http.Redirect(w, r, s.absPath("/auth", c.ID)+"?req="+authReq.ID, http.StatusFound)
return
}
}
s.tokenErrHelper(w, errInvalidConnectorID, "Connector ID does not match a valid Connector", http.StatusNotFound)
return
}
if len(connectors) == 1 && !s.alwaysShowLogin {
for _, c := range connectors {
// TODO(ericchiang): Make this pass on r.URL.RawQuery and let something latter
// on create the auth request.
http.Redirect(w, r, s.absPath("/auth", c.ID)+"?req="+authReq.ID, http.StatusFound)
return
}
}
connectorInfos := make([]connectorInfo, len(connectors))
for index, conn := range connectors {
connectorInfos[index] = connectorInfo{
ID: conn.ID,
Name: conn.Name,
Type: conn.Type,
// TODO(ericchiang): Make this pass on r.URL.RawQuery and let something latter
// on create the auth request.
URL: s.absPath("/auth", conn.ID) + "?req=" + authReq.ID,
}
}
if err := s.templates.login(r, w, connectorInfos, r.URL.Path); err != nil {
s.logger.Errorf("Server template error: %v", err)
}
}
func (s *Server) handleConnectorLogin(w http.ResponseWriter, r *http.Request) {
connID := mux.Vars(r)["connector"]
conn, err := s.getConnector(connID)
if err != nil {
s.logger.Errorf("Failed to get connector: %v", err)
s.renderError(r, w, http.StatusBadRequest, "Requested resource does not exist")
return
}
authReqID := r.FormValue("req")
authReq, err := s.storage.GetAuthRequest(authReqID)
if err != nil {
s.logger.Errorf("Failed to get auth request: %v", err)
if err == storage.ErrNotFound {
s.renderError(r, w, http.StatusBadRequest, "Login session expired.")
} else {
s.renderError(r, w, http.StatusInternalServerError, "Database error.")
}
return
}
// Set the connector being used for the login.
if authReq.ConnectorID != connID {
updater := func(a storage.AuthRequest) (storage.AuthRequest, error) {
if a.ConnectorID != "" {
return a, fmt.Errorf("connector is already set for this auth request")
}
a.ConnectorID = connID
return a, nil
}
if err := s.storage.UpdateAuthRequest(authReqID, updater); err != nil {
s.logger.Errorf("Failed to set connector ID on auth request: %v", err)
s.renderError(r, w, http.StatusInternalServerError, "Database error.")
return
}
}
scopes := parseScopes(authReq.Scopes)
showBacklink := len(s.connectors) > 1
switch r.Method {
case http.MethodGet:
switch conn := conn.Connector.(type) {
case connector.CallbackConnector:
// Use the auth request ID as the "state" token.
//
// TODO(ericchiang): Is this appropriate or should we also be using a nonce?
callbackURL, err := conn.LoginURL(scopes, s.absURL("/callback"), authReqID)
if err != nil {
s.logger.Errorf("Connector %q returned error when creating callback: %v", connID, err)
s.renderError(r, w, http.StatusInternalServerError, "Login error.")
return
}
http.Redirect(w, r, callbackURL, http.StatusFound)
case connector.PasswordConnector:
if err := s.templates.password(r, w, r.URL.String(), "", usernamePrompt(conn), false, showBacklink, r.URL.Path); err != nil {
s.logger.Errorf("Server template error: %v", err)
}
case connector.SAMLConnector:
action, value, err := conn.POSTData(scopes, authReqID)
if err != nil {
s.logger.Errorf("Creating SAML data: %v", err)
s.renderError(r, w, http.StatusInternalServerError, "Connector Login Error")
return
}
// TODO(ericchiang): Don't inline this.
fmt.Fprintf(w, `<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>SAML login</title>
</head>
<body>
<form method="post" action="%s" >
<input type="hidden" name="SAMLRequest" value="%s" />
<input type="hidden" name="RelayState" value="%s" />
</form>
<script>
document.forms[0].submit();
</script>
</body>
</html>`, action, value, authReqID)
default:
s.renderError(r, w, http.StatusBadRequest, "Requested resource does not exist.")
}
case http.MethodPost:
passwordConnector, ok := conn.Connector.(connector.PasswordConnector)
if !ok {
s.renderError(r, w, http.StatusBadRequest, "Requested resource does not exist.")
return
}
username := r.FormValue("login")
password := r.FormValue("password")
identity, ok, err := passwordConnector.Login(r.Context(), scopes, username, password)
if err != nil {
s.logger.Errorf("Failed to login user: %v", err)
s.renderError(r, w, http.StatusInternalServerError, fmt.Sprintf("Login error: %v", err))
return
}
if !ok {
if err := s.templates.password(r, w, r.URL.String(), username, usernamePrompt(passwordConnector), true, showBacklink, r.URL.Path); err != nil {
s.logger.Errorf("Server template error: %v", err)
}
return
}
redirectURL, err := s.finalizeLogin(identity, authReq, conn.Connector)
if err != nil {
s.logger.Errorf("Failed to finalize login: %v", err)
s.renderError(r, w, http.StatusInternalServerError, "Login error.")
return
}
http.Redirect(w, r, redirectURL, http.StatusSeeOther)
default:
s.renderError(r, w, http.StatusBadRequest, "Unsupported request method.")
}
}
func (s *Server) handleConnectorCallback(w http.ResponseWriter, r *http.Request) {
var authID string
switch r.Method {
case http.MethodGet: // OAuth2 callback
if authID = r.URL.Query().Get("state"); authID == "" {
s.renderError(r, w, http.StatusBadRequest, "User session error.")
return
}
case http.MethodPost: // SAML POST binding
if authID = r.PostFormValue("RelayState"); authID == "" {
s.renderError(r, w, http.StatusBadRequest, "User session error.")
return
}
default:
s.renderError(r, w, http.StatusBadRequest, "Method not supported")
return
}
authReq, err := s.storage.GetAuthRequest(authID)
if err != nil {
if err == storage.ErrNotFound {
s.logger.Errorf("Invalid 'state' parameter provided: %v", err)
s.renderError(r, w, http.StatusBadRequest, "Requested resource does not exist.")
return
}
s.logger.Errorf("Failed to get auth request: %v", err)
s.renderError(r, w, http.StatusInternalServerError, "Database error.")
return
}
if connID := mux.Vars(r)["connector"]; connID != "" && connID != authReq.ConnectorID {
s.logger.Errorf("Connector mismatch: authentication started with id %q, but callback for id %q was triggered", authReq.ConnectorID, connID)
s.renderError(r, w, http.StatusInternalServerError, "Requested resource does not exist.")
return
}
conn, err := s.getConnector(authReq.ConnectorID)
if err != nil {
s.logger.Errorf("Failed to get connector with id %q : %v", authReq.ConnectorID, err)
s.renderError(r, w, http.StatusInternalServerError, "Requested resource does not exist.")
return
}
var identity connector.Identity
switch conn := conn.Connector.(type) {
case connector.CallbackConnector:
if r.Method != http.MethodGet {
s.logger.Errorf("SAML request mapped to OAuth2 connector")
s.renderError(r, w, http.StatusBadRequest, "Invalid request")
return
}
identity, err = conn.HandleCallback(parseScopes(authReq.Scopes), r)
case connector.SAMLConnector:
if r.Method != http.MethodPost {
s.logger.Errorf("OAuth2 request mapped to SAML connector")
s.renderError(r, w, http.StatusBadRequest, "Invalid request")
return
}
identity, err = conn.HandlePOST(parseScopes(authReq.Scopes), r.PostFormValue("SAMLResponse"), authReq.ID)
default:
s.renderError(r, w, http.StatusInternalServerError, "Requested resource does not exist.")
return
}
if err != nil {
s.logger.Errorf("Failed to authenticate: %v", err)
s.renderError(r, w, http.StatusInternalServerError, fmt.Sprintf("Failed to authenticate: %v", err))
return
}
redirectURL, err := s.finalizeLogin(identity, authReq, conn.Connector)
if err != nil {
s.logger.Errorf("Failed to finalize login: %v", err)
s.renderError(r, w, http.StatusInternalServerError, "Login error.")
return
}
http.Redirect(w, r, redirectURL, http.StatusSeeOther)
}
// finalizeLogin associates the user's identity with the current AuthRequest, then returns
// the approval page's path.
func (s *Server) finalizeLogin(identity connector.Identity, authReq storage.AuthRequest, conn connector.Connector) (string, error) {
claims := storage.Claims{
UserID: identity.UserID,
Username: identity.Username,
PreferredUsername: identity.PreferredUsername,
Email: identity.Email,
EmailVerified: identity.EmailVerified,
Groups: identity.Groups,
}
updater := func(a storage.AuthRequest) (storage.AuthRequest, error) {
a.LoggedIn = true
a.Claims = claims
a.ConnectorData = identity.ConnectorData
return a, nil
}
if err := s.storage.UpdateAuthRequest(authReq.ID, updater); err != nil {
return "", fmt.Errorf("failed to update auth request: %v", err)
}
email := claims.Email
if !claims.EmailVerified {
email = email + " (unverified)"
}
s.logger.Infof("login successful: connector %q, username=%q, preferred_username=%q, email=%q, groups=%q",
authReq.ConnectorID, claims.Username, claims.PreferredUsername, email, claims.Groups)
returnURL := path.Join(s.issuerURL.Path, "/approval") + "?req=" + authReq.ID
_, ok := conn.(connector.RefreshConnector)
if !ok {
return returnURL, nil
}
// Try to retrieve an existing OfflineSession object for the corresponding user.
if session, err := s.storage.GetOfflineSessions(identity.UserID, authReq.ConnectorID); err != nil {
if err != storage.ErrNotFound {
s.logger.Errorf("failed to get offline session: %v", err)
return "", err
}
offlineSessions := storage.OfflineSessions{
UserID: identity.UserID,
ConnID: authReq.ConnectorID,
Refresh: make(map[string]*storage.RefreshTokenRef),
ConnectorData: identity.ConnectorData,
}
// Create a new OfflineSession object for the user and add a reference object for
// the newly received refreshtoken.
if err := s.storage.CreateOfflineSessions(offlineSessions); err != nil {
s.logger.Errorf("failed to create offline session: %v", err)
return "", err
}
} else {
// Update existing OfflineSession obj with new RefreshTokenRef.
if err := s.storage.UpdateOfflineSessions(session.UserID, session.ConnID, func(old storage.OfflineSessions) (storage.OfflineSessions, error) {
if len(identity.ConnectorData) > 0 {
old.ConnectorData = identity.ConnectorData
}
return old, nil
}); err != nil {
s.logger.Errorf("failed to update offline session: %v", err)
return "", err
}
}
return returnURL, nil
}
func (s *Server) handleApproval(w http.ResponseWriter, r *http.Request) {
authReq, err := s.storage.GetAuthRequest(r.FormValue("req"))
if err != nil {
s.logger.Errorf("Failed to get auth request: %v", err)
s.renderError(r, w, http.StatusInternalServerError, "Database error.")
return
}
if !authReq.LoggedIn {
s.logger.Errorf("Auth request does not have an identity for approval")
s.renderError(r, w, http.StatusInternalServerError, "Login process not yet finalized.")
return
}
switch r.Method {
case http.MethodGet:
if s.skipApproval {
s.sendCodeResponse(w, r, authReq)
return
}
client, err := s.storage.GetClient(authReq.ClientID)
if err != nil {
s.logger.Errorf("Failed to get client %q: %v", authReq.ClientID, err)
s.renderError(r, w, http.StatusInternalServerError, "Failed to retrieve client.")
return
}
if err := s.templates.approval(r, w, authReq.ID, authReq.Claims.Username, client.Name, authReq.Scopes, r.URL.Path); err != nil {
s.logger.Errorf("Server template error: %v", err)
}
case http.MethodPost:
if r.FormValue("approval") != "approve" {
s.renderError(r, w, http.StatusInternalServerError, "Approval rejected.")
return
}
s.sendCodeResponse(w, r, authReq)
}
}
func (s *Server) sendCodeResponse(w http.ResponseWriter, r *http.Request, authReq storage.AuthRequest) {
if s.now().After(authReq.Expiry) {
s.renderError(r, w, http.StatusBadRequest, "User session has expired.")
return
}
if err := s.storage.DeleteAuthRequest(authReq.ID); err != nil {
if err != storage.ErrNotFound {
s.logger.Errorf("Failed to delete authorization request: %v", err)
s.renderError(r, w, http.StatusInternalServerError, "Internal server error.")
} else {
s.renderError(r, w, http.StatusBadRequest, "User session error.")
}
return
}
u, err := url.Parse(authReq.RedirectURI)
if err != nil {
s.renderError(r, w, http.StatusInternalServerError, "Invalid redirect URI.")
return
}
var (
// Was the initial request using the implicit or hybrid flow instead of
// the "normal" code flow?
implicitOrHybrid = false
// Only present in hybrid or code flow. code.ID == "" if this is not set.
code storage.AuthCode
// ID token returned immediately if the response_type includes "id_token".
// Only valid for implicit and hybrid flows.
idToken string
idTokenExpiry time.Time
// Access token
accessToken string
)
for _, responseType := range authReq.ResponseTypes {
switch responseType {
case responseTypeCode:
code = storage.AuthCode{
ID: storage.NewID(),
ClientID: authReq.ClientID,
ConnectorID: authReq.ConnectorID,
Nonce: authReq.Nonce,
Scopes: authReq.Scopes,
Claims: authReq.Claims,
Expiry: s.now().Add(time.Minute * 30),
RedirectURI: authReq.RedirectURI,
ConnectorData: authReq.ConnectorData,
}
if err := s.storage.CreateAuthCode(code); err != nil {
s.logger.Errorf("Failed to create auth code: %v", err)
s.renderError(r, w, http.StatusInternalServerError, "Internal server error.")
return
}
// Implicit and hybrid flows that try to use the OOB redirect URI are
// rejected earlier. If we got here we're using the code flow.
if authReq.RedirectURI == redirectURIOOB {
if err := s.templates.oob(r, w, code.ID, r.URL.Path); err != nil {
s.logger.Errorf("Server template error: %v", err)
}
return
}
case responseTypeToken:
implicitOrHybrid = true
case responseTypeIDToken:
implicitOrHybrid = true
var err error
accessToken, err = s.newAccessToken(authReq.ClientID, authReq.Claims, authReq.Scopes, authReq.Nonce, authReq.ConnectorID)
if err != nil {
s.logger.Errorf("failed to create new access token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
idToken, idTokenExpiry, err = s.newIDToken(authReq.ClientID, authReq.Claims, authReq.Scopes, authReq.Nonce, accessToken, authReq.ConnectorID)
if err != nil {
s.logger.Errorf("failed to create ID token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
}
}
if implicitOrHybrid {
v := url.Values{}
v.Set("access_token", accessToken)
v.Set("token_type", "bearer")
v.Set("state", authReq.State)
if idToken != "" {
v.Set("id_token", idToken)
// The hybrid flow with only "code token" or "code id_token" doesn't return an
// "expires_in" value. If "code" wasn't provided, indicating the implicit flow,
// don't add it.
//
// https://openid.net/specs/openid-connect-core-1_0.html#HybridAuthResponse
if code.ID == "" {
v.Set("expires_in", strconv.Itoa(int(idTokenExpiry.Sub(s.now()).Seconds())))
}
}
if code.ID != "" {
v.Set("code", code.ID)
}
// Implicit and hybrid flows return their values as part of the fragment.
//
// HTTP/1.1 303 See Other
// Location: https://client.example.org/cb#
// access_token=SlAV32hkKG
// &token_type=bearer
// &id_token=eyJ0 ... NiJ9.eyJ1c ... I6IjIifX0.DeWt4Qu ... ZXso
// &expires_in=3600
// &state=af0ifjsldkj
//
u.Fragment = v.Encode()
} else {
// The code flow add values to the URL query.
//
// HTTP/1.1 303 See Other
// Location: https://client.example.org/cb?
// code=SplxlOBeZQQYbYS6WxSbIA
// &state=af0ifjsldkj
//
q := u.Query()
q.Set("code", code.ID)
q.Set("state", authReq.State)
u.RawQuery = q.Encode()
}
http.Redirect(w, r, u.String(), http.StatusSeeOther)
}
func (s *Server) handleToken(w http.ResponseWriter, r *http.Request) {
clientID, clientSecret, ok := r.BasicAuth()
if ok {
var err error
if clientID, err = url.QueryUnescape(clientID); err != nil {
s.tokenErrHelper(w, errInvalidRequest, "client_id improperly encoded", http.StatusBadRequest)
return
}
if clientSecret, err = url.QueryUnescape(clientSecret); err != nil {
s.tokenErrHelper(w, errInvalidRequest, "client_secret improperly encoded", http.StatusBadRequest)
return
}
} else {
clientID = r.PostFormValue("client_id")
clientSecret = r.PostFormValue("client_secret")
}
client, err := s.storage.GetClient(clientID)
if err != nil {
if err != storage.ErrNotFound {
s.logger.Errorf("failed to get client: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
} else {
s.tokenErrHelper(w, errInvalidClient, "Invalid client credentials.", http.StatusUnauthorized)
}
return
}
if client.Secret != clientSecret {
s.tokenErrHelper(w, errInvalidClient, "Invalid client credentials.", http.StatusUnauthorized)
return
}
grantType := r.PostFormValue("grant_type")
switch grantType {
case grantTypeAuthorizationCode:
s.handleAuthCode(w, r, client)
case grantTypeRefreshToken:
s.handleRefreshToken(w, r, client)
case grantTypePassword:
s.handlePasswordGrant(w, r, client)
default:
s.tokenErrHelper(w, errInvalidGrant, "", http.StatusBadRequest)
}
}
// handle an access token request https://tools.ietf.org/html/rfc6749#section-4.1.3
func (s *Server) handleAuthCode(w http.ResponseWriter, r *http.Request, client storage.Client) {
code := r.PostFormValue("code")
redirectURI := r.PostFormValue("redirect_uri")
authCode, err := s.storage.GetAuthCode(code)
if err != nil || s.now().After(authCode.Expiry) || authCode.ClientID != client.ID {
if err != storage.ErrNotFound {
s.logger.Errorf("failed to get auth code: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
} else {
s.tokenErrHelper(w, errInvalidRequest, "Invalid or expired code parameter.", http.StatusBadRequest)
}
return
}
if authCode.RedirectURI != redirectURI {
s.tokenErrHelper(w, errInvalidRequest, "redirect_uri did not match URI from initial request.", http.StatusBadRequest)
return
}
tokenResponse, err := s.exchangeAuthCode(w, authCode, client)
if err != nil {
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
s.writeAccessToken(w, tokenResponse)
}
func (s *Server) exchangeAuthCode(w http.ResponseWriter, authCode storage.AuthCode, client storage.Client) (*accessTokenReponse, error) {
accessToken, err := s.newAccessToken(client.ID, authCode.Claims, authCode.Scopes, authCode.Nonce, authCode.ConnectorID)
if err != nil {
s.logger.Errorf("failed to create new access token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return nil, err
}
idToken, expiry, err := s.newIDToken(client.ID, authCode.Claims, authCode.Scopes, authCode.Nonce, accessToken, authCode.ConnectorID)
if err != nil {
s.logger.Errorf("failed to create ID token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return nil, err
}
if err := s.storage.DeleteAuthCode(authCode.ID); err != nil {
s.logger.Errorf("failed to delete auth code: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return nil, err
}
reqRefresh := func() bool {
// Ensure the connector supports refresh tokens.
//
// Connectors like `saml` do not implement RefreshConnector.
conn, err := s.getConnector(authCode.ConnectorID)
if err != nil {
s.logger.Errorf("connector with ID %q not found: %v", authCode.ConnectorID, err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return false
}
_, ok := conn.Connector.(connector.RefreshConnector)
if !ok {
return false
}
for _, scope := range authCode.Scopes {
if scope == scopeOfflineAccess {
return true
}
}
return false
}()
var refreshToken string
if reqRefresh {
refresh := storage.RefreshToken{
ID: storage.NewID(),
Token: storage.NewID(),
ClientID: authCode.ClientID,
ConnectorID: authCode.ConnectorID,
Scopes: authCode.Scopes,
Claims: authCode.Claims,
Nonce: authCode.Nonce,
ConnectorData: authCode.ConnectorData,
CreatedAt: s.now(),
LastUsed: s.now(),
}
token := &internal.RefreshToken{
RefreshId: refresh.ID,
Token: refresh.Token,
}
if refreshToken, err = internal.Marshal(token); err != nil {
s.logger.Errorf("failed to marshal refresh token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return nil, err
}
if err := s.storage.CreateRefresh(refresh); err != nil {
s.logger.Errorf("failed to create refresh token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return nil, err
}
// deleteToken determines if we need to delete the newly created refresh token
// due to a failure in updating/creating the OfflineSession object for the
// corresponding user.
var deleteToken bool
defer func() {
if deleteToken {
// Delete newly created refresh token from storage.
if err := s.storage.DeleteRefresh(refresh.ID); err != nil {
s.logger.Errorf("failed to delete refresh token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
}
}()
tokenRef := storage.RefreshTokenRef{
ID: refresh.ID,
ClientID: refresh.ClientID,
CreatedAt: refresh.CreatedAt,
LastUsed: refresh.LastUsed,
}
// Try to retrieve an existing OfflineSession object for the corresponding user.
if session, err := s.storage.GetOfflineSessions(refresh.Claims.UserID, refresh.ConnectorID); err != nil {
if err != storage.ErrNotFound {
s.logger.Errorf("failed to get offline session: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
deleteToken = true
return nil, err
}
offlineSessions := storage.OfflineSessions{
UserID: refresh.Claims.UserID,
ConnID: refresh.ConnectorID,
Refresh: make(map[string]*storage.RefreshTokenRef),
}
offlineSessions.Refresh[tokenRef.ClientID] = &tokenRef
// Create a new OfflineSession object for the user and add a reference object for
// the newly received refreshtoken.
if err := s.storage.CreateOfflineSessions(offlineSessions); err != nil {
s.logger.Errorf("failed to create offline session: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
deleteToken = true
return nil, err
}
} else {
if oldTokenRef, ok := session.Refresh[tokenRef.ClientID]; ok {
// Delete old refresh token from storage.
if err := s.storage.DeleteRefresh(oldTokenRef.ID); err != nil && err != storage.ErrNotFound {
s.logger.Errorf("failed to delete refresh token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
deleteToken = true
return nil, err
}
}
// Update existing OfflineSession obj with new RefreshTokenRef.
if err := s.storage.UpdateOfflineSessions(session.UserID, session.ConnID, func(old storage.OfflineSessions) (storage.OfflineSessions, error) {
old.Refresh[tokenRef.ClientID] = &tokenRef
return old, nil
}); err != nil {
s.logger.Errorf("failed to update offline session: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
deleteToken = true
return nil, err
}
}
}
return s.toAccessTokenResponse(idToken, accessToken, refreshToken, expiry), nil
}
// handle a refresh token request https://tools.ietf.org/html/rfc6749#section-6
func (s *Server) handleRefreshToken(w http.ResponseWriter, r *http.Request, client storage.Client) {
code := r.PostFormValue("refresh_token")
scope := r.PostFormValue("scope")
if code == "" {
s.tokenErrHelper(w, errInvalidRequest, "No refresh token in request.", http.StatusBadRequest)
return
}
token := new(internal.RefreshToken)
if err := internal.Unmarshal(code, token); err != nil {
// For backward compatibility, assume the refresh_token is a raw refresh token ID
// if it fails to decode.
//
// Because refresh_token values that aren't unmarshable were generated by servers
// that don't have a Token value, we'll still reject any attempts to claim a
// refresh_token twice.
token = &internal.RefreshToken{RefreshId: code, Token: ""}
}
refresh, err := s.storage.GetRefresh(token.RefreshId)
if err != nil {
s.logger.Errorf("failed to get refresh token: %v", err)
if err == storage.ErrNotFound {
s.tokenErrHelper(w, errInvalidRequest, "Refresh token is invalid or has already been claimed by another client.", http.StatusBadRequest)
} else {
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
}
return
}
if refresh.ClientID != client.ID {
s.logger.Errorf("client %s trying to claim token for client %s", client.ID, refresh.ClientID)
s.tokenErrHelper(w, errInvalidRequest, "Refresh token is invalid or has already been claimed by another client.", http.StatusBadRequest)
return
}
if refresh.Token != token.Token {
s.logger.Errorf("refresh token with id %s claimed twice", refresh.ID)
s.tokenErrHelper(w, errInvalidRequest, "Refresh token is invalid or has already been claimed by another client.", http.StatusBadRequest)
return
}
// Per the OAuth2 spec, if the client has omitted the scopes, default to the original
// authorized scopes.
//
// https://tools.ietf.org/html/rfc6749#section-6
scopes := refresh.Scopes
if scope != "" {
requestedScopes := strings.Fields(scope)
var unauthorizedScopes []string
for _, s := range requestedScopes {
contains := func() bool {
for _, scope := range refresh.Scopes {
if s == scope {
return true
}
}
return false
}()
if !contains {
unauthorizedScopes = append(unauthorizedScopes, s)
}
}
if len(unauthorizedScopes) > 0 {
msg := fmt.Sprintf("Requested scopes contain unauthorized scope(s): %q.", unauthorizedScopes)
s.tokenErrHelper(w, errInvalidRequest, msg, http.StatusBadRequest)
return
}
scopes = requestedScopes
}
var connectorData []byte
if session, err := s.storage.GetOfflineSessions(refresh.Claims.UserID, refresh.ConnectorID); err != nil {
if err != storage.ErrNotFound {
s.logger.Errorf("failed to get offline session: %v", err)
return
}
} else if len(refresh.ConnectorData) > 0 {
// Use the old connector data if it exists, should be deleted once used
connectorData = refresh.ConnectorData
} else {
connectorData = session.ConnectorData
}
conn, err := s.getConnector(refresh.ConnectorID)
if err != nil {
s.logger.Errorf("connector with ID %q not found: %v", refresh.ConnectorID, err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
ident := connector.Identity{
UserID: refresh.Claims.UserID,
Username: refresh.Claims.Username,
PreferredUsername: refresh.Claims.PreferredUsername,
Email: refresh.Claims.Email,
EmailVerified: refresh.Claims.EmailVerified,
Groups: refresh.Claims.Groups,
ConnectorData: connectorData,
}
// Can the connector refresh the identity? If so, attempt to refresh the data
// in the connector.
//
// TODO(ericchiang): We may want a strict mode where connectors that don't implement
// this interface can't perform refreshing.
if refreshConn, ok := conn.Connector.(connector.RefreshConnector); ok {
newIdent, err := refreshConn.Refresh(r.Context(), parseScopes(scopes), ident)
if err != nil {
s.logger.Errorf("failed to refresh identity: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
ident = newIdent
}
claims := storage.Claims{
UserID: ident.UserID,
Username: ident.Username,
PreferredUsername: ident.PreferredUsername,
Email: ident.Email,
EmailVerified: ident.EmailVerified,
Groups: ident.Groups,
}
accessToken, err := s.newAccessToken(client.ID, claims, scopes, refresh.Nonce, refresh.ConnectorID)
if err != nil {
s.logger.Errorf("failed to create new access token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
idToken, expiry, err := s.newIDToken(client.ID, claims, scopes, refresh.Nonce, accessToken, refresh.ConnectorID)
if err != nil {
s.logger.Errorf("failed to create ID token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
newToken := &internal.RefreshToken{
RefreshId: refresh.ID,
Token: storage.NewID(),
}
rawNewToken, err := internal.Marshal(newToken)
if err != nil {
s.logger.Errorf("failed to marshal refresh token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
lastUsed := s.now()
updater := func(old storage.RefreshToken) (storage.RefreshToken, error) {
if old.Token != refresh.Token {
return old, errors.New("refresh token claimed twice")
}
old.Token = newToken.Token
// Update the claims of the refresh token.
//
// UserID intentionally ignored for now.
old.Claims.Username = ident.Username
old.Claims.PreferredUsername = ident.PreferredUsername
old.Claims.Email = ident.Email
old.Claims.EmailVerified = ident.EmailVerified
old.Claims.Groups = ident.Groups
old.LastUsed = lastUsed
// ConnectorData has been moved to OfflineSession
old.ConnectorData = []byte{}
return old, nil
}
// Update LastUsed time stamp in refresh token reference object
// in offline session for the user.
if err := s.storage.UpdateOfflineSessions(refresh.Claims.UserID, refresh.ConnectorID, func(old storage.OfflineSessions) (storage.OfflineSessions, error) {
if old.Refresh[refresh.ClientID].ID != refresh.ID {
return old, errors.New("refresh token invalid")
}
old.Refresh[refresh.ClientID].LastUsed = lastUsed
old.ConnectorData = ident.ConnectorData
return old, nil
}); err != nil {
s.logger.Errorf("failed to update offline session: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
// Update refresh token in the storage.
if err := s.storage.UpdateRefreshToken(refresh.ID, updater); err != nil {
s.logger.Errorf("failed to update refresh token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
resp := s.toAccessTokenResponse(idToken, accessToken, rawNewToken, expiry)
s.writeAccessToken(w, resp)
}
func (s *Server) handleUserInfo(w http.ResponseWriter, r *http.Request) {
const prefix = "Bearer "
auth := r.Header.Get("authorization")
if len(auth) < len(prefix) || !strings.EqualFold(prefix, auth[:len(prefix)]) {
w.Header().Set("WWW-Authenticate", "Bearer")
s.tokenErrHelper(w, errAccessDenied, "Invalid bearer token.", http.StatusUnauthorized)
return
}
rawIDToken := auth[len(prefix):]
verifier := oidc.NewVerifier(s.issuerURL.String(), &storageKeySet{s.storage}, &oidc.Config{SkipClientIDCheck: true})
idToken, err := verifier.Verify(r.Context(), rawIDToken)
if err != nil {
s.tokenErrHelper(w, errAccessDenied, err.Error(), http.StatusForbidden)
return
}
var claims json.RawMessage
if err := idToken.Claims(&claims); err != nil {
s.tokenErrHelper(w, errServerError, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(claims)
}
func (s *Server) handlePasswordGrant(w http.ResponseWriter, r *http.Request, client storage.Client) {
// Parse the fields
if err := r.ParseForm(); err != nil {
s.tokenErrHelper(w, errInvalidRequest, "Couldn't parse data", http.StatusBadRequest)
return
}
q := r.Form
nonce := q.Get("nonce")
// Some clients, like the old go-oidc, provide extra whitespace. Tolerate this.
scopes := strings.Fields(q.Get("scope"))
// Parse the scopes if they are passed
var (
unrecognized []string
invalidScopes []string
)
hasOpenIDScope := false
for _, scope := range scopes {
switch scope {
case scopeOpenID:
hasOpenIDScope = true
case scopeOfflineAccess, scopeEmail, scopeProfile, scopeGroups, scopeFederatedID:
default:
peerID, ok := parseCrossClientScope(scope)
if !ok {
unrecognized = append(unrecognized, scope)
continue
}
isTrusted, err := s.validateCrossClientTrust(client.ID, peerID)
if err != nil {
s.tokenErrHelper(w, errInvalidClient, fmt.Sprintf("Error validating cross client trust %v.", err), http.StatusBadRequest)
return
}
if !isTrusted {
invalidScopes = append(invalidScopes, scope)
}
}
}
if !hasOpenIDScope {
s.tokenErrHelper(w, errInvalidRequest, `Missing required scope(s) ["openid"].`, http.StatusBadRequest)
return
}
if len(unrecognized) > 0 {
s.tokenErrHelper(w, errInvalidRequest, fmt.Sprintf("Unrecognized scope(s) %q", unrecognized), http.StatusBadRequest)
return
}
if len(invalidScopes) > 0 {
s.tokenErrHelper(w, errInvalidRequest, fmt.Sprintf("Client can't request scope(s) %q", invalidScopes), http.StatusBadRequest)
return
}
// Which connector
connID := s.passwordConnector
conn, err := s.getConnector(connID)
if err != nil {
s.tokenErrHelper(w, errInvalidRequest, "Requested connector does not exist.", http.StatusBadRequest)
return
}
passwordConnector, ok := conn.Connector.(connector.PasswordConnector)
if !ok {
s.tokenErrHelper(w, errInvalidRequest, "Requested password connector does not correct type.", http.StatusBadRequest)
return
}
// Login
username := q.Get("username")
password := q.Get("password")
identity, ok, err := passwordConnector.Login(r.Context(), parseScopes(scopes), username, password)
if err != nil {
s.tokenErrHelper(w, errInvalidRequest, "Could not login user", http.StatusBadRequest)
return
}
if !ok {
s.tokenErrHelper(w, errAccessDenied, "Invalid username or password", http.StatusUnauthorized)
return
}
// Build the claims to send the id token
claims := storage.Claims{
UserID: identity.UserID,
Username: identity.Username,
PreferredUsername: identity.PreferredUsername,
Email: identity.Email,
EmailVerified: identity.EmailVerified,
Groups: identity.Groups,
}
accessToken := storage.NewID()
idToken, expiry, err := s.newIDToken(client.ID, claims, scopes, nonce, accessToken, connID)
if err != nil {
s.tokenErrHelper(w, errServerError, fmt.Sprintf("failed to create ID token: %v", err), http.StatusInternalServerError)
return
}
reqRefresh := func() bool {
// Ensure the connector supports refresh tokens.
//
// Connectors like `saml` do not implement RefreshConnector.
_, ok := conn.Connector.(connector.RefreshConnector)
if !ok {
return false
}
for _, scope := range scopes {
if scope == scopeOfflineAccess {
return true
}
}
return false
}()
var refreshToken string
if reqRefresh {
refresh := storage.RefreshToken{
ID: storage.NewID(),
Token: storage.NewID(),
ClientID: client.ID,
ConnectorID: connID,
Scopes: scopes,
Claims: claims,
Nonce: nonce,
// ConnectorData: authCode.ConnectorData,
CreatedAt: s.now(),
LastUsed: s.now(),
}
token := &internal.RefreshToken{
RefreshId: refresh.ID,
Token: refresh.Token,
}
if refreshToken, err = internal.Marshal(token); err != nil {
s.logger.Errorf("failed to marshal refresh token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
if err := s.storage.CreateRefresh(refresh); err != nil {
s.logger.Errorf("failed to create refresh token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
// deleteToken determines if we need to delete the newly created refresh token
// due to a failure in updating/creating the OfflineSession object for the
// corresponding user.
var deleteToken bool
defer func() {
if deleteToken {
// Delete newly created refresh token from storage.
if err := s.storage.DeleteRefresh(refresh.ID); err != nil {
s.logger.Errorf("failed to delete refresh token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
}
}()
tokenRef := storage.RefreshTokenRef{
ID: refresh.ID,
ClientID: refresh.ClientID,
CreatedAt: refresh.CreatedAt,
LastUsed: refresh.LastUsed,
}
// Try to retrieve an existing OfflineSession object for the corresponding user.
if session, err := s.storage.GetOfflineSessions(refresh.Claims.UserID, refresh.ConnectorID); err != nil {
if err != storage.ErrNotFound {
s.logger.Errorf("failed to get offline session: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
deleteToken = true
return
}
offlineSessions := storage.OfflineSessions{
UserID: refresh.Claims.UserID,
ConnID: refresh.ConnectorID,
Refresh: make(map[string]*storage.RefreshTokenRef),
}
offlineSessions.Refresh[tokenRef.ClientID] = &tokenRef
// Create a new OfflineSession object for the user and add a reference object for
// the newly received refreshtoken.
if err := s.storage.CreateOfflineSessions(offlineSessions); err != nil {
s.logger.Errorf("failed to create offline session: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
deleteToken = true
return
}
} else {
if oldTokenRef, ok := session.Refresh[tokenRef.ClientID]; ok {
// Delete old refresh token from storage.
if err := s.storage.DeleteRefresh(oldTokenRef.ID); err != nil {
if err == storage.ErrNotFound {
s.logger.Warnf("database inconsistent, refresh token missing: %v", oldTokenRef.ID)
} else {
s.logger.Errorf("failed to delete refresh token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
deleteToken = true
return
}
}
}
// Update existing OfflineSession obj with new RefreshTokenRef.
if err := s.storage.UpdateOfflineSessions(session.UserID, session.ConnID, func(old storage.OfflineSessions) (storage.OfflineSessions, error) {
old.Refresh[tokenRef.ClientID] = &tokenRef
return old, nil
}); err != nil {
s.logger.Errorf("failed to update offline session: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
deleteToken = true
return
}
}
}
resp := s.toAccessTokenResponse(idToken, accessToken, refreshToken, expiry)
s.writeAccessToken(w, resp)
}
type accessTokenReponse struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
ExpiresIn int `json:"expires_in"`
RefreshToken string `json:"refresh_token,omitempty"`
IDToken string `json:"id_token"`
}
func (s *Server) toAccessTokenResponse(idToken, accessToken, refreshToken string, expiry time.Time) *accessTokenReponse {
return &accessTokenReponse{
accessToken,
"bearer",
int(expiry.Sub(s.now()).Seconds()),
refreshToken,
idToken,
}
}
func (s *Server) writeAccessToken(w http.ResponseWriter, resp *accessTokenReponse) {
data, err := json.Marshal(resp)
if err != nil {
s.logger.Errorf("failed to marshal access token response: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Length", strconv.Itoa(len(data)))
w.Write(data)
}
func (s *Server) renderError(r *http.Request, w http.ResponseWriter, status int, description string) {
if err := s.templates.err(r, w, status, description); err != nil {
s.logger.Errorf("Server template error: %v", err)
}
}
func (s *Server) tokenErrHelper(w http.ResponseWriter, typ string, description string, statusCode int) {
if err := tokenErr(w, typ, description, statusCode); err != nil {
s.logger.Errorf("token error response: %v", err)
}
}
// Check for username prompt override from connector. Defaults to "Username".
func usernamePrompt(conn connector.PasswordConnector) string {
if attr := conn.Prompt(); attr != "" {
return attr
}
return "Username"
}
| {
a := storage.AuthRequest{
ID: storage.NewID(),
ClientID: storage.NewID(),
// Set a short expiry so if the delete fails this will be cleaned up quickly by garbage collection.
Expiry: now().Add(time.Minute),
}
if err := s.CreateAuthRequest(a); err != nil {
return fmt.Errorf("create auth request: %v", err)
}
if err := s.DeleteAuthRequest(a.ID); err != nil {
return fmt.Errorf("delete auth request: %v", err)
}
return nil
} |
amdOneFile-wrapper.js | (function(){
var modules = {}, cache = {}
if (typeof define == 'undefined'){
window.define = function(id, factory){
modules[id] = factory
}
window.require = function(id){
var module = cache[id]
if (!module){
module = cache[id] = {}
var exports = module.exports = {}
modules[id].call(exports, require, exports, module)
}
return module.exports
} | }
})() |
|
test_service.py | """Tests for pywemo.ouimeaux_device.api.service."""
import unittest.mock as mock
from xml.etree import ElementTree
from xml.etree import cElementTree as cet
import pytest
import requests
import pywemo.ouimeaux_device.api.service as svc
HEADERS_KWARG_KEY = "headers"
CONTENT_TYPE_KEY = "Content-Type"
SOAPACTION_KEY = "SOAPACTION"
MOCK_ARGS_ORDERED = 0
MOCK_ARGS_KWARGS = 1
svc.LOG = mock.Mock()
MOCK_RESPONSE = (
b'<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"'
b' s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">'
b'<s:Body>\n<u:GetInsightParamsResponse xmlns:u="urn:Belkin:service:metainfo:1">'
b"\r\n<InsightParams>0|1604849509|85|1315|27628|1209600|772|0|21689183|386799026.000000|8000"
b"</InsightParams>\r\n</u:GetInsightParamsResponse>\r\n</s:Body> </s:Envelope>"
)
class TestAction:
@staticmethod
def get_mock_action(name="", service_type="", url=""):
device = mock.Mock()
service = mock.Mock()
service.serviceType = service_type
service.controlURL = url
action_config = mock.MagicMock()
action_config.get_name = lambda: name
return svc.Action(device, service, action_config)
@staticmethod
def get_et_mock():
resp = cet.fromstring(MOCK_RESPONSE)
return mock.MagicMock(return_value=resp)
def test_call_post_request_is_made_exactly_once_when_successful(self):
action = self.get_mock_action()
requests.post = post_mock = mock.Mock()
cet.fromstring = self.get_et_mock()
action()
assert post_mock.call_count == 1
def test_call_request_has_well_formed_xml_body(self):
action = self.get_mock_action(name="cool_name", service_type="service")
requests.post = post_mock = mock.Mock()
cet.fromstring = self.get_et_mock()
action()
body = post_mock.call_args[MOCK_ARGS_ORDERED][1]
ElementTree.fromstring(body) # will raise error if xml is malformed
def test_call_request_has_correct_header_keys(self):
action = self.get_mock_action()
requests.post = post_mock = mock.Mock()
action()
headers = post_mock.call_args[MOCK_ARGS_KWARGS][HEADERS_KWARG_KEY]
for header in [CONTENT_TYPE_KEY, SOAPACTION_KEY]:
assert header in headers
def | (self):
action = self.get_mock_action()
requests.post = post_mock = mock.Mock()
action()
headers = post_mock.call_args[MOCK_ARGS_KWARGS][HEADERS_KWARG_KEY]
content_type_header = headers[CONTENT_TYPE_KEY]
assert content_type_header == "text/xml"
def test_call_headers_has_correct_soapaction(self):
service_type = "some_service"
name = "cool_name"
action = self.get_mock_action(name, service_type)
requests.post = post_mock = mock.Mock()
action()
headers = post_mock.call_args[MOCK_ARGS_KWARGS][HEADERS_KWARG_KEY]
soapaction_header = headers[SOAPACTION_KEY]
assert soapaction_header == '"%s#%s"' % (service_type, name)
def test_call_headers_has_correct_url(self):
url = "http://www.github.com/"
action = self.get_mock_action(url=url)
requests.post = post_mock = mock.Mock()
action()
actual_url = post_mock.call_args[MOCK_ARGS_ORDERED][0]
assert actual_url == url
def test_call_request_is_tried_up_to_max_on_communication_error(self):
action = self.get_mock_action()
requests.post = post_mock = mock.Mock(
side_effect=requests.exceptions.RequestException
)
try:
action()
except svc.ActionException:
pass
assert post_mock.call_count == svc.MAX_RETRIES
def test_call_throws_when_final_retry_fails(self):
action = self.get_mock_action()
requests.post = mock.Mock(
side_effect=requests.exceptions.RequestException
)
with pytest.raises(svc.ActionException):
action()
def test_call_returns_correct_dictionary_with_response_contents(self):
action = self.get_mock_action()
requests.post = mock.Mock()
envelope = cet.Element("soapEnvelope")
body = cet.SubElement(envelope, "soapBody")
response = cet.SubElement(body, "soapResponse")
response_content = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
for key, value in response_content.items():
element = cet.SubElement(response, key)
element.text = value
cet.fromstring = mock.MagicMock(return_value=envelope)
actual_responses = action()
assert actual_responses == response_content
| test_call_headers_has_correct_content_type |
clnet.py | '''
CrossLink Network
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
def swish(x):
return x * x.sigmoid()
def mish(x):
return x * torch.tanh(F.softplus(x))
class CrossLinkBlock(nn.Module):
'''Cross-Link Block'''
def __init__(self, in_channels, out_channels, kernel_size, pool_enable):
super(CrossLinkBlock, self).__init__()
self.pool_enable = pool_enable
self.ReLU = nn.ReLU()
# basic blocks
self.dconv1_1 = nn.Conv2d(in_channels,
in_channels,
kernel_size=kernel_size[0],
stride=1,
padding='same',
groups=1,
bias=False)
self.dconv1_2 = nn.Conv2d(in_channels,
in_channels,
kernel_size=kernel_size[1],
stride=1,
padding='same',
groups=1,
bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.bn2 = nn.BatchNorm2d(in_channels)
self.pconv = nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding='same',
groups=1,
bias=False)
self.bn3 = nn.BatchNorm2d(out_channels)
self.maxpool = nn.MaxPool2d(2, 2)
def forward(self, x):
|
class CLNET(nn.Module):
def __init__(self, cfg, num_classes=10):
super(CLNET, self).__init__()
self.cfg = cfg
self.conv1 = nn.Conv2d(3,
32,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32,
32,
kernel_size=3,
stride=1,
padding=1,
groups=1,
bias=False)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32,
16,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.layers = self._make_layers(in_channels=16)
self.linear = nn.Linear(cfg['out_channels'][-1], num_classes)
def _make_layers(self, in_channels):
layers = []
cfg = [self.cfg[k] for k in ['out_channels', 'kernel_size', 'pool_enable']]
for out_channels, kernel_size, pool_enable in zip(*cfg):
layers.append(
CrossLinkBlock(in_channels,
out_channels,
kernel_size,
pool_enable))
in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = mish(self.bn1(self.pool1(self.conv1(x)))) # conv block
out = self.conv3(swish(self.bn2(self.conv2(out)))) # sep block
out = self.layers(out)
out = F.adaptive_avg_pool2d(out, 1)
out = out.view(out.size(0), -1)
dropout_rate = self.cfg['dropout_rate']
if self.training and dropout_rate > 0:
out = F.dropout(out, p=dropout_rate)
out = self.linear(out)
return out
def CLNet_V0(num_classes):
cfg = {
'out_channels': [24, 40, 80, 112, 160],
'kernel_size': [(5, 3), (3, 5), (3, 3), (5, 5), (3, 3)],
'pool_enable': [True, True, True, True, False],
'dropout_rate': 0.2
}
return CLNET(cfg, num_classes=num_classes)
import torchinfo
def test():
net = CLNet_V0(10)
torchinfo.summary(net, (1, 3, 32, 32))
x = torch.randn(3, 3, 32, 32, device='cuda')
y = net(x)
print(y.shape)
if __name__ == '__main__':
test()
| '''add forward here'''
out1 = self.dconv1_1(x)
out2 = self.dconv1_2(x)
out1 = torch.mul(out1, self.ReLU(out1))
out2 = torch.mul(out1, self.ReLU(out2))
out = self.bn1(out1) + self.bn2(out2)
out = self.bn3(self.pconv(out))
if self.pool_enable:
out = self.maxpool(out)
return out |
logger.rs | use opentelemetry::{
global,
sdk::{propagation::TraceContextPropagator, trace::Config, Resource},
KeyValue,
};
use opentelemetry_otlp::WithExportConfig;
use tracing::{dispatcher::SetGlobalDefaultError, subscriber};
use tracing_subscriber::{layer::SubscriberExt, registry::LookupSpan, EnvFilter, FmtSubscriber};
use crate::LogFormat;
type LoggerResult<T> = Result<T, SetGlobalDefaultError>;
/// An installer for a global logger.
#[derive(Debug, Clone)]
pub struct Logger<'a> {
service_name: &'static str,
log_format: LogFormat,
enable_telemetry: bool,
log_queries: bool,
telemetry_endpoint: Option<&'a str>,
}
impl<'a> Logger<'a> {
/// Initialize a new global logger installer.
pub fn | (service_name: &'static str) -> Self {
Self {
service_name,
log_format: LogFormat::Json,
enable_telemetry: false,
log_queries: false,
telemetry_endpoint: None,
}
}
/// Sets the STDOUT log output format. Default: Json.
pub fn log_format(&mut self, log_format: LogFormat) {
self.log_format = log_format;
}
/// Enable query logging. Default: false.
pub fn log_queries(&mut self, log_queries: bool) {
self.log_queries = log_queries;
}
/// Enables Jaeger telemetry.
pub fn enable_telemetry(&mut self, enable_telemetry: bool) {
self.enable_telemetry = enable_telemetry;
}
/// Sets a custom telemetry endpoint (default: http://localhost:4317)
pub fn telemetry_endpoint(&mut self, endpoint: &'a str) {
self.telemetry_endpoint = Some(endpoint);
}
/// Install logger as a global. Can be called only once per application
/// instance. The returned guard value needs to stay in scope for the whole
/// lifetime of the service.
pub fn install(self) -> LoggerResult<()> {
let mut filter = EnvFilter::from_default_env()
.add_directive("tide=error".parse().unwrap())
.add_directive("tonic=error".parse().unwrap())
.add_directive("h2=error".parse().unwrap())
.add_directive("hyper=error".parse().unwrap())
.add_directive("tower=error".parse().unwrap());
if self.log_queries {
filter = filter.add_directive("quaint[{is_query}]=trace".parse().unwrap());
}
match self.log_format {
LogFormat::Text => {
if self.enable_telemetry {
let subscriber = FmtSubscriber::builder()
.with_env_filter(filter.add_directive("trace".parse().unwrap()))
.finish();
self.finalize(subscriber)
} else {
let subscriber = FmtSubscriber::builder().with_env_filter(filter).finish();
self.finalize(subscriber)
}
}
LogFormat::Json => {
let subscriber = FmtSubscriber::builder().json().with_env_filter(filter).finish();
self.finalize(subscriber)
}
}
}
fn finalize<T>(self, subscriber: T) -> LoggerResult<()>
where
T: SubscriberExt + Send + Sync + 'static + for<'span> LookupSpan<'span>,
{
if self.enable_telemetry {
global::set_text_map_propagator(TraceContextPropagator::new());
// A special parameter for Jaeger to set the service name in spans.
let resource = Resource::new(vec![KeyValue::new("service.name", self.service_name)]);
let config = Config::default().with_resource(resource);
let mut builder = opentelemetry_otlp::new_pipeline().tracing().with_trace_config(config);
let mut exporter = opentelemetry_otlp::new_exporter().tonic();
if let Some(endpoint) = self.telemetry_endpoint {
exporter = exporter.with_endpoint(endpoint);
}
builder = builder.with_exporter(exporter);
// TODO: Use async batch exporter
let tracer = builder.install_batch(opentelemetry::runtime::AsyncStd).unwrap();
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
subscriber::set_global_default(subscriber.with(telemetry))?;
Ok(())
} else {
subscriber::set_global_default(subscriber)?;
Ok(())
}
}
}
| new |
plugin.py | """A pytest plugin which helps testing Django applications
This plugin handles creating and destroying the test environment and
test database and provides some useful text fixtures.
"""
import contextlib
import inspect
from functools import reduce
import os
import pathlib
import sys
import pytest
from .django_compat import is_django_unittest # noqa
from .fixtures import django_assert_num_queries # noqa
from .fixtures import django_assert_max_num_queries # noqa
from .fixtures import django_db_setup # noqa
from .fixtures import django_db_use_migrations # noqa
from .fixtures import django_db_keepdb # noqa
from .fixtures import django_db_createdb # noqa
from .fixtures import django_db_modify_db_settings # noqa
from .fixtures import django_db_modify_db_settings_parallel_suffix # noqa
from .fixtures import django_db_modify_db_settings_tox_suffix # noqa
from .fixtures import django_db_modify_db_settings_xdist_suffix # noqa
from .fixtures import _live_server_helper # noqa
from .fixtures import admin_client # noqa
from .fixtures import admin_user # noqa
from .fixtures import async_client # noqa
from .fixtures import client # noqa
from .fixtures import db # noqa
from .fixtures import django_user_model # noqa
from .fixtures import django_username_field # noqa
from .fixtures import live_server # noqa
from .fixtures import django_db_reset_sequences # noqa
from .fixtures import async_rf # noqa
from .fixtures import rf # noqa
from .fixtures import settings # noqa
from .fixtures import transactional_db # noqa
from .lazy_django import django_settings_is_configured, skip_if_no_django
SETTINGS_MODULE_ENV = "DJANGO_SETTINGS_MODULE"
CONFIGURATION_ENV = "DJANGO_CONFIGURATION"
INVALID_TEMPLATE_VARS_ENV = "FAIL_INVALID_TEMPLATE_VARS"
_report_header = []
# ############### pytest hooks ################
def pytest_addoption(parser):
group = parser.getgroup("django")
group.addoption(
"--reuse-db",
action="store_true",
dest="reuse_db",
default=False,
help="Re-use the testing database if it already exists, "
"and do not remove it when the test finishes.",
)
group.addoption(
"--create-db",
action="store_true",
dest="create_db",
default=False,
help="Re-create the database, even if it exists. This "
"option can be used to override --reuse-db.",
)
group.addoption(
"--ds",
action="store",
type=str,
dest="ds",
default=None,
help="Set DJANGO_SETTINGS_MODULE.",
)
group.addoption(
"--dc",
action="store",
type=str,
dest="dc",
default=None,
help="Set DJANGO_CONFIGURATION.",
)
group.addoption(
"--nomigrations",
"--no-migrations",
action="store_true",
dest="nomigrations",
default=False,
help="Disable Django migrations on test setup",
)
group.addoption(
"--migrations",
action="store_false",
dest="nomigrations",
default=False,
help="Enable Django migrations on test setup",
)
parser.addini(
CONFIGURATION_ENV, "django-configurations class to use by pytest-django."
)
group.addoption(
"--liveserver",
default=None,
help="Address and port for the live_server fixture.",
)
parser.addini(
SETTINGS_MODULE_ENV, "Django settings module to use by pytest-django."
)
parser.addini(
"django_find_project",
"Automatically find and add a Django project to the " "Python path.",
type="bool",
default=True,
)
parser.addini(
"django_debug_mode",
"How to set the Django DEBUG setting (default `False`). "
"Use `keep` to not override.",
default="False",
)
group.addoption(
"--fail-on-template-vars",
action="store_true",
dest="itv",
default=False,
help="Fail for invalid variables in templates.",
)
parser.addini(
INVALID_TEMPLATE_VARS_ENV,
"Fail for invalid variables in templates.",
type="bool",
default=False,
)
PROJECT_FOUND = (
"pytest-django found a Django project in %s "
"(it contains manage.py) and added it to the Python path.\n"
'If this is wrong, add "django_find_project = false" to '
"pytest.ini and explicitly manage your Python path."
)
PROJECT_NOT_FOUND = (
"pytest-django could not find a Django project "
"(no manage.py file could be found). You must "
"explicitly add your Django project to the Python path "
"to have it picked up."
)
PROJECT_SCAN_DISABLED = (
"pytest-django did not search for Django "
"projects since it is disabled in the configuration "
'("django_find_project = false")'
)
@contextlib.contextmanager
def _handle_import_error(extra_message):
try:
yield
except ImportError as e:
django_msg = (e.args[0] + "\n\n") if e.args else ""
msg = django_msg + extra_message
raise ImportError(msg)
def _add_django_project_to_path(args):
def is_django_project(path):
try:
return path.is_dir() and (path / "manage.py").exists()
except OSError:
return False
def arg_to_path(arg):
# Test classes or functions can be appended to paths separated by ::
arg = arg.split("::", 1)[0]
return pathlib.Path(arg)
def find_django_path(args):
args = map(str, args)
args = [arg_to_path(x) for x in args if not x.startswith("-")]
cwd = pathlib.Path.cwd()
if not args:
args.append(cwd)
elif cwd not in args:
args.append(cwd)
for arg in args:
if is_django_project(arg):
return arg
for parent in arg.parents:
if is_django_project(parent):
return parent
return None
project_dir = find_django_path(args)
if project_dir:
sys.path.insert(0, str(project_dir.absolute()))
return PROJECT_FOUND % project_dir
return PROJECT_NOT_FOUND
def _setup_django():
if "django" not in sys.modules:
return
import django.conf
# Avoid force-loading Django when settings are not properly configured.
if not django.conf.settings.configured:
return
import django.apps
if not django.apps.apps.ready:
django.setup()
_blocking_manager.block()
def _get_boolean_value(x, name, default=None):
if x is None:
return default
if x in (True, False):
return x
possible_values = {"true": True, "false": False, "1": True, "0": False}
try:
return possible_values[x.lower()]
except KeyError:
raise ValueError(
"{} is not a valid value for {}. "
"It must be one of {}.".format(x, name, ", ".join(possible_values.keys()))
)
def pytest_load_initial_conftests(early_config, parser, args):
# Register the marks
early_config.addinivalue_line(
"markers",
"django_db(transaction=False): Mark the test as using "
"the Django test database. The *transaction* argument marks will "
"allow you to use real transactions in the test like Django's "
"TransactionTestCase.",
)
early_config.addinivalue_line(
"markers",
"urls(modstr): Use a different URLconf for this test, similar to "
"the `urls` attribute of Django's `TestCase` objects. *modstr* is "
"a string specifying the module of a URL config, e.g. "
'"my_app.test_urls".',
)
early_config.addinivalue_line(
"markers",
"ignore_template_errors(): ignore errors from invalid template "
"variables (if --fail-on-template-vars is used).",
)
options = parser.parse_known_args(args)
if options.version or options.help:
return
django_find_project = _get_boolean_value(
early_config.getini("django_find_project"), "django_find_project"
)
if django_find_project:
_django_project_scan_outcome = _add_django_project_to_path(args)
else:
_django_project_scan_outcome = PROJECT_SCAN_DISABLED
if (
options.itv
or _get_boolean_value(
os.environ.get(INVALID_TEMPLATE_VARS_ENV), INVALID_TEMPLATE_VARS_ENV
)
or early_config.getini(INVALID_TEMPLATE_VARS_ENV)
):
os.environ[INVALID_TEMPLATE_VARS_ENV] = "true"
def _get_option_with_source(option, envname):
if option:
return option, "option"
if envname in os.environ:
return os.environ[envname], "env"
cfgval = early_config.getini(envname)
if cfgval:
return cfgval, "ini"
return None, None
ds, ds_source = _get_option_with_source(options.ds, SETTINGS_MODULE_ENV)
dc, dc_source = _get_option_with_source(options.dc, CONFIGURATION_ENV)
if ds:
_report_header.append("settings: {} (from {})".format(ds, ds_source))
os.environ[SETTINGS_MODULE_ENV] = ds
if dc:
_report_header.append("configuration: {} (from {})".format(dc, dc_source))
os.environ[CONFIGURATION_ENV] = dc
# Install the django-configurations importer
import configurations.importer
configurations.importer.install()
# Forcefully load Django settings, throws ImportError or
# ImproperlyConfigured if settings cannot be loaded.
from django.conf import settings as dj_settings
with _handle_import_error(_django_project_scan_outcome):
dj_settings.DATABASES
_setup_django()
def pytest_report_header():
if _report_header:
return ["django: " + ", ".join(_report_header)]
@pytest.hookimpl(trylast=True)
def pytest_configure():
# Allow Django settings to be configured in a user pytest_configure call,
# but make sure we call django.setup()
_setup_django()
@pytest.hookimpl(tryfirst=True)
def pytest_collection_modifyitems(items):
# If Django is not configured we don't need to bother
|
@pytest.fixture(autouse=True, scope="session")
def django_test_environment(request):
"""
Ensure that Django is loaded and has its testing environment setup.
XXX It is a little dodgy that this is an autouse fixture. Perhaps
an email fixture should be requested in order to be able to
use the Django email machinery just like you need to request a
db fixture for access to the Django database, etc. But
without duplicating a lot more of Django's test support code
we need to follow this model.
"""
if django_settings_is_configured():
_setup_django()
from django.test.utils import setup_test_environment, teardown_test_environment
debug_ini = request.config.getini("django_debug_mode")
if debug_ini == "keep":
debug = None
else:
debug = _get_boolean_value(debug_ini, False)
setup_test_environment(debug=debug)
request.addfinalizer(teardown_test_environment)
@pytest.fixture(scope="session")
def django_db_blocker():
"""Wrapper around Django's database access.
This object can be used to re-enable database access. This fixture is used
internally in pytest-django to build the other fixtures and can be used for
special database handling.
The object is a context manager and provides the methods
.unblock()/.block() and .restore() to temporarily enable database access.
This is an advanced feature that is meant to be used to implement database
fixtures.
"""
if not django_settings_is_configured():
return None
return _blocking_manager
@pytest.fixture(autouse=True)
def _django_db_marker(request):
"""Implement the django_db marker, internal to pytest-django.
This will dynamically request the ``db``, ``transactional_db`` or
``django_db_reset_sequences`` fixtures as required by the django_db marker.
"""
marker = request.node.get_closest_marker("django_db")
if marker:
transaction, reset_sequences = validate_django_db(marker)
if reset_sequences:
request.getfixturevalue("django_db_reset_sequences")
elif transaction:
request.getfixturevalue("transactional_db")
else:
request.getfixturevalue("db")
@pytest.fixture(autouse=True, scope="class")
def _django_setup_unittest(request, django_db_blocker):
"""Setup a django unittest, internal to pytest-django."""
if not django_settings_is_configured() or not is_django_unittest(request):
yield
return
# Fix/patch pytest.
# Before pytest 5.4: https://github.com/pytest-dev/pytest/issues/5991
# After pytest 5.4: https://github.com/pytest-dev/pytest-django/issues/824
from _pytest.unittest import TestCaseFunction
original_runtest = TestCaseFunction.runtest
def non_debugging_runtest(self):
self._testcase(result=self)
try:
TestCaseFunction.runtest = non_debugging_runtest
request.getfixturevalue("django_db_setup")
with django_db_blocker.unblock():
yield
finally:
TestCaseFunction.runtest = original_runtest
@pytest.fixture(scope="function", autouse=True)
def _dj_autoclear_mailbox():
if not django_settings_is_configured():
return
from django.core import mail
del mail.outbox[:]
@pytest.fixture(scope="function")
def mailoutbox(django_mail_patch_dns, _dj_autoclear_mailbox):
if not django_settings_is_configured():
return
from django.core import mail
return mail.outbox
@pytest.fixture(scope="function")
def django_mail_patch_dns(monkeypatch, django_mail_dnsname):
from django.core import mail
monkeypatch.setattr(mail.message, "DNS_NAME", django_mail_dnsname)
@pytest.fixture(scope="function")
def django_mail_dnsname():
return "fake-tests.example.com"
@pytest.fixture(autouse=True, scope="function")
def _django_set_urlconf(request):
"""Apply the @pytest.mark.urls marker, internal to pytest-django."""
marker = request.node.get_closest_marker("urls")
if marker:
skip_if_no_django()
import django.conf
from django.urls import clear_url_caches, set_urlconf
urls = validate_urls(marker)
original_urlconf = django.conf.settings.ROOT_URLCONF
django.conf.settings.ROOT_URLCONF = urls
clear_url_caches()
set_urlconf(None)
def restore():
django.conf.settings.ROOT_URLCONF = original_urlconf
# Copy the pattern from
# https://github.com/django/django/blob/master/django/test/signals.py#L152
clear_url_caches()
set_urlconf(None)
request.addfinalizer(restore)
@pytest.fixture(autouse=True, scope="session")
def _fail_for_invalid_template_variable():
"""Fixture that fails for invalid variables in templates.
This fixture will fail each test that uses django template rendering
should a template contain an invalid template variable.
The fail message will include the name of the invalid variable and
in most cases the template name.
It does not raise an exception, but fails, as the stack trace doesn't
offer any helpful information to debug.
This behavior can be switched off using the marker:
``pytest.mark.ignore_template_errors``
"""
class InvalidVarException:
"""Custom handler for invalid strings in templates."""
def __init__(self):
self.fail = True
def __contains__(self, key):
return key == "%s"
@staticmethod
def _get_origin():
stack = inspect.stack()
# Try to use topmost `self.origin` first (Django 1.9+, and with
# TEMPLATE_DEBUG)..
for f in stack[2:]:
func = f[3]
if func == "render":
frame = f[0]
try:
origin = frame.f_locals["self"].origin
except (AttributeError, KeyError):
continue
if origin is not None:
return origin
from django.template import Template
# finding the ``render`` needle in the stack
frame = reduce(
lambda x, y: y[3] == "render" and "base.py" in y[1] and y or x, stack
)
# assert 0, stack
frame = frame[0]
# finding only the frame locals in all frame members
f_locals = reduce(
lambda x, y: y[0] == "f_locals" and y or x, inspect.getmembers(frame)
)[1]
# ``django.template.base.Template``
template = f_locals["self"]
if isinstance(template, Template):
return template.name
def __mod__(self, var):
origin = self._get_origin()
if origin:
msg = "Undefined template variable '{}' in '{}'".format(var, origin)
else:
msg = "Undefined template variable '%s'" % var
if self.fail:
pytest.fail(msg)
else:
return msg
if (
os.environ.get(INVALID_TEMPLATE_VARS_ENV, "false") == "true"
and django_settings_is_configured()
):
from django.conf import settings as dj_settings
if dj_settings.TEMPLATES:
dj_settings.TEMPLATES[0]["OPTIONS"]["string_if_invalid"] = InvalidVarException()
@pytest.fixture(autouse=True)
def _template_string_if_invalid_marker(request):
"""Apply the @pytest.mark.ignore_template_errors marker,
internal to pytest-django."""
marker = request.keywords.get("ignore_template_errors", None)
if os.environ.get(INVALID_TEMPLATE_VARS_ENV, "false") == "true":
if marker and django_settings_is_configured():
from django.conf import settings as dj_settings
if dj_settings.TEMPLATES:
dj_settings.TEMPLATES[0]["OPTIONS"]["string_if_invalid"].fail = False
@pytest.fixture(autouse=True, scope="function")
def _django_clear_site_cache():
"""Clears ``django.contrib.sites.models.SITE_CACHE`` to avoid
unexpected behavior with cached site objects.
"""
if django_settings_is_configured():
from django.conf import settings as dj_settings
if "django.contrib.sites" in dj_settings.INSTALLED_APPS:
from django.contrib.sites.models import Site
Site.objects.clear_cache()
# ############### Helper Functions ################
class _DatabaseBlockerContextManager:
def __init__(self, db_blocker):
self._db_blocker = db_blocker
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
self._db_blocker.restore()
class _DatabaseBlocker:
"""Manager for django.db.backends.base.base.BaseDatabaseWrapper.
This is the object returned by django_db_blocker.
"""
def __init__(self):
self._history = []
self._real_ensure_connection = None
@property
def _dj_db_wrapper(self):
from django.db.backends.base.base import BaseDatabaseWrapper
# The first time the _dj_db_wrapper is accessed, we will save a
# reference to the real implementation.
if self._real_ensure_connection is None:
self._real_ensure_connection = BaseDatabaseWrapper.ensure_connection
return BaseDatabaseWrapper
def _save_active_wrapper(self):
return self._history.append(self._dj_db_wrapper.ensure_connection)
def _blocking_wrapper(*args, **kwargs):
__tracebackhide__ = True
__tracebackhide__ # Silence pyflakes
raise RuntimeError(
"Database access not allowed, "
'use the "django_db" mark, or the '
'"db" or "transactional_db" fixtures to enable it.'
)
def unblock(self):
"""Enable access to the Django database."""
self._save_active_wrapper()
self._dj_db_wrapper.ensure_connection = self._real_ensure_connection
return _DatabaseBlockerContextManager(self)
def block(self):
"""Disable access to the Django database."""
self._save_active_wrapper()
self._dj_db_wrapper.ensure_connection = self._blocking_wrapper
return _DatabaseBlockerContextManager(self)
def restore(self):
self._dj_db_wrapper.ensure_connection = self._history.pop()
_blocking_manager = _DatabaseBlocker()
def validate_django_db(marker):
"""Validate the django_db marker.
It checks the signature and creates the ``transaction`` and
``reset_sequences`` attributes on the marker which will have the
correct values.
A sequence reset is only allowed when combined with a transaction.
"""
def apifun(transaction=False, reset_sequences=False):
return transaction, reset_sequences
return apifun(*marker.args, **marker.kwargs)
def validate_urls(marker):
"""Validate the urls marker.
It checks the signature and creates the `urls` attribute on the
marker which will have the correct value.
"""
def apifun(urls):
return urls
return apifun(*marker.args, **marker.kwargs)
| if not django_settings_is_configured():
return
from django.test import TestCase, TransactionTestCase
def get_order_number(test):
if hasattr(test, "cls") and test.cls:
# Beware, TestCase is a subclass of TransactionTestCase
if issubclass(test.cls, TestCase):
return 0
if issubclass(test.cls, TransactionTestCase):
return 1
marker_db = test.get_closest_marker('django_db')
if marker_db:
transaction = validate_django_db(marker_db)[0]
if transaction is True:
return 1
else:
transaction = None
fixtures = getattr(test, 'fixturenames', [])
if "transactional_db" in fixtures:
return 1
if transaction is False:
return 0
if "db" in fixtures:
return 0
return 2
items[:] = sorted(items, key=get_order_number) |
api.go | package chat
import (
"io/ioutil"
"log"
"net/http"
"os"
"regexp"
"time"
"github.com/gin-gonic/gin"
)
func GetServerStatus(c *gin.Context) {
c.JSON(200, gin.H{
"status": "server status id good",
})
}
func GetRunTestResult(c *gin.Context) {
_, session, chatData := InitApi()
if &chatData != nil {
chatData = GenerateChatData()
}
//insert test data
InsertData(session, &chatData)
//select insert data
SelectTestData(session, &chatData)
//select all data at chat table
result := AllSelectData(session)
c.String(http.StatusOK, "Test done.", result)
}
func GetHTMLPage(c *gin.Context) {
env, _, _ := InitApi()
f, err := os.Open("./web/livechat.html")
if err != nil {
log.Println("file read error", err)
}
defer f.Close()
b, err := ioutil.ReadAll(f)
if err != nil {
log.Fatal("html file read error: ", err)
}
//html static http://localhost:8080/
// if public endpoint
ApiEndpoint := "http://localhost:" + env.AppPort + "/"
rep := regexp.MustCompile(`http://localhost:8080/`)
// local env check
localCheck := regexp.MustCompile(`localhost|127.0.0.1`)
if !localCheck.MatchString(env.AppEndpoint) {
ApiEndpoint = "https://" + env.AppEndpoint + "/"
}
str := rep.ReplaceAllString(string(b), ApiEndpoint)
c.Header("Content-Type", "text/html")
c.Header("Access-Control-Allow-Origin", "*")
c.String(http.StatusOK, str)
}
func PostInsertChatData(c *gin.Context) {
_, session, _ := InitApi()
defer session.Close()
var json Comment
if err := c.ShouldBindJSON(&json); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
postData := Comment{
Name: json.Name,
Time: time.Now().UnixNano(),
Chatroom: "room-oranie",
Comment: json.Comment,
}
log.Printf("%v", json)
resp := InsertData(session, &postData)
c.JSON(http.StatusOK, resp)
}
func GetLatestChatData(c *gin.Context) |
func GetAllChatData(c *gin.Context) {
_, session, _ := InitApi()
chatroom := "room-oranie"
chatData := ChatroomAllData(session, chatroom)
comnents := Comments{Response: chatData}
log.Println("all data :", comnents)
c.JSON(http.StatusOK, comnents)
}
| {
_, session, _ := InitApi()
chatroom := "room-oranie"
chatData := ChatroomLatestData(session, chatroom)
comnents := Comments{Response: chatData}
log.Println("latest data :", comnents)
c.JSON(http.StatusOK, comnents)
} |
servicemanagement-gen.go | // Copyright 2021 Google LLC.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated file. DO NOT EDIT.
// Package servicemanagement provides access to the Service Management API.
//
// For product documentation, see: https://cloud.google.com/service-management/
//
// Creating a client
//
// Usage example:
//
// import "google.golang.org/api/servicemanagement/v1"
// ...
// ctx := context.Background()
// servicemanagementService, err := servicemanagement.NewService(ctx)
//
// In this example, Google Application Default Credentials are used for authentication.
//
// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials.
//
// Other authentication options
//
// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes:
//
// servicemanagementService, err := servicemanagement.NewService(ctx, option.WithScopes(servicemanagement.ServiceManagementReadonlyScope))
//
// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey:
//
// servicemanagementService, err := servicemanagement.NewService(ctx, option.WithAPIKey("AIza..."))
//
// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource:
//
// config := &oauth2.Config{...}
// // ...
// token, err := config.Exchange(ctx, ...)
// servicemanagementService, err := servicemanagement.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token)))
//
// See https://godoc.org/google.golang.org/api/option/ for details on options.
package servicemanagement // import "google.golang.org/api/servicemanagement/v1"
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
googleapi "google.golang.org/api/googleapi"
gensupport "google.golang.org/api/internal/gensupport"
option "google.golang.org/api/option"
internaloption "google.golang.org/api/option/internaloption"
htransport "google.golang.org/api/transport/http"
)
// Always reference these packages, just in case the auto-generated code
// below doesn't.
var _ = bytes.NewBuffer
var _ = strconv.Itoa
var _ = fmt.Sprintf
var _ = json.NewDecoder
var _ = io.Copy
var _ = url.Parse
var _ = gensupport.MarshalJSON
var _ = googleapi.Version
var _ = errors.New
var _ = strings.Replace
var _ = context.Canceled
var _ = internaloption.WithDefaultEndpoint
const apiId = "servicemanagement:v1"
const apiName = "servicemanagement"
const apiVersion = "v1"
const basePath = "https://servicemanagement.googleapis.com/"
const mtlsBasePath = "https://servicemanagement.mtls.googleapis.com/"
// OAuth2 scopes used by this API.
const (
// See, edit, configure, and delete your Google Cloud data and see the
// email address for your Google Account.
CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
// View your data across Google Cloud services and see the email address
// of your Google Account
CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only"
// Manage your Google API service configuration
ServiceManagementScope = "https://www.googleapis.com/auth/service.management"
// View your Google API service configuration
ServiceManagementReadonlyScope = "https://www.googleapis.com/auth/service.management.readonly"
)
// NewService creates a new APIService.
func NewService(ctx context.Context, opts ...option.ClientOption) (*APIService, error) {
scopesOption := option.WithScopes(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
"https://www.googleapis.com/auth/service.management",
"https://www.googleapis.com/auth/service.management.readonly",
)
// NOTE: prepend, so we don't override user-specified scopes.
opts = append([]option.ClientOption{scopesOption}, opts...)
opts = append(opts, internaloption.WithDefaultEndpoint(basePath))
opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath))
client, endpoint, err := htransport.NewClient(ctx, opts...)
if err != nil {
return nil, err
}
s, err := New(client)
if err != nil {
return nil, err
}
if endpoint != "" {
s.BasePath = endpoint
}
return s, nil
}
// New creates a new APIService. It uses the provided http.Client for requests.
//
// Deprecated: please use NewService instead.
// To provide a custom HTTP client, use option.WithHTTPClient.
// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead.
func New(client *http.Client) (*APIService, error) {
if client == nil {
return nil, errors.New("client is nil")
}
s := &APIService{client: client, BasePath: basePath}
s.Operations = NewOperationsService(s)
s.Services = NewServicesService(s)
return s, nil
}
type APIService struct {
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
Operations *OperationsService
Services *ServicesService
}
func (s *APIService) userAgent() string {
if s.UserAgent == "" {
return googleapi.UserAgent
}
return googleapi.UserAgent + " " + s.UserAgent
}
func NewOperationsService(s *APIService) *OperationsService {
rs := &OperationsService{s: s}
return rs
}
type OperationsService struct {
s *APIService
}
func NewServicesService(s *APIService) *ServicesService {
rs := &ServicesService{s: s}
rs.Configs = NewServicesConfigsService(s)
rs.Consumers = NewServicesConsumersService(s)
rs.Rollouts = NewServicesRolloutsService(s)
return rs
}
type ServicesService struct {
s *APIService
Configs *ServicesConfigsService
Consumers *ServicesConsumersService
Rollouts *ServicesRolloutsService
}
func | (s *APIService) *ServicesConfigsService {
rs := &ServicesConfigsService{s: s}
return rs
}
type ServicesConfigsService struct {
s *APIService
}
func NewServicesConsumersService(s *APIService) *ServicesConsumersService {
rs := &ServicesConsumersService{s: s}
return rs
}
type ServicesConsumersService struct {
s *APIService
}
func NewServicesRolloutsService(s *APIService) *ServicesRolloutsService {
rs := &ServicesRolloutsService{s: s}
return rs
}
type ServicesRolloutsService struct {
s *APIService
}
// Advice: Generated advice about this change, used for providing more
// information about how a change will affect the existing service.
type Advice struct {
// Description: Useful description for why this advice was applied and
// what actions should be taken to mitigate any implied risks.
Description string `json:"description,omitempty"`
// ForceSendFields is a list of field names (e.g. "Description") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Description") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Advice) MarshalJSON() ([]byte, error) {
type NoMethod Advice
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Api: Api is a light-weight descriptor for an API Interface.
// Interfaces are also described as "protocol buffer services" in some
// contexts, such as by the "service" keyword in a .proto file, but they
// are different from API Services, which represent a concrete
// implementation of an interface as opposed to simply a description of
// methods and bindings. They are also sometimes simply referred to as
// "APIs" in other contexts, such as the name of this message itself.
// See https://cloud.google.com/apis/design/glossary for detailed
// terminology.
type Api struct {
// Methods: The methods of this interface, in unspecified order.
Methods []*Method `json:"methods,omitempty"`
// Mixins: Included interfaces. See Mixin.
Mixins []*Mixin `json:"mixins,omitempty"`
// Name: The fully qualified name of this interface, including package
// name followed by the interface's simple name.
Name string `json:"name,omitempty"`
// Options: Any metadata attached to the interface.
Options []*Option `json:"options,omitempty"`
// SourceContext: Source context for the protocol buffer service
// represented by this message.
SourceContext *SourceContext `json:"sourceContext,omitempty"`
// Syntax: The source syntax of the service.
//
// Possible values:
// "SYNTAX_PROTO2" - Syntax `proto2`.
// "SYNTAX_PROTO3" - Syntax `proto3`.
Syntax string `json:"syntax,omitempty"`
// Version: A version string for this interface. If specified, must have
// the form `major-version.minor-version`, as in `1.10`. If the minor
// version is omitted, it defaults to zero. If the entire version field
// is empty, the major version is derived from the package name, as
// outlined below. If the field is not empty, the version in the package
// name will be verified to be consistent with what is provided here.
// The versioning schema uses semantic versioning (http://semver.org)
// where the major version number indicates a breaking change and the
// minor version an additive, non-breaking change. Both version numbers
// are signals to users what to expect from different versions, and
// should be carefully chosen based on the product plan. The major
// version is also reflected in the package name of the interface, which
// must end in `v`, as in `google.feature.v1`. For major versions 0 and
// 1, the suffix can be omitted. Zero major versions must only be used
// for experimental, non-GA interfaces.
Version string `json:"version,omitempty"`
// ForceSendFields is a list of field names (e.g. "Methods") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Methods") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Api) MarshalJSON() ([]byte, error) {
type NoMethod Api
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// AuditConfig: Specifies the audit configuration for a service. The
// configuration determines which permission types are logged, and what
// identities, if any, are exempted from logging. An AuditConfig must
// have one or more AuditLogConfigs. If there are AuditConfigs for both
// `allServices` and a specific service, the union of the two
// AuditConfigs is used for that service: the log_types specified in
// each AuditConfig are enabled, and the exempted_members in each
// AuditLogConfig are exempted. Example Policy with multiple
// AuditConfigs: { "audit_configs": [ { "service": "allServices",
// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members":
// [ "user:[email protected]" ] }, { "log_type": "DATA_WRITE" }, {
// "log_type": "ADMIN_READ" } ] }, { "service":
// "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type":
// "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [
// "user:[email protected]" ] } ] } ] } For sampleservice, this policy
// enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts
// [email protected] from DATA_READ logging, and [email protected] from
// DATA_WRITE logging.
type AuditConfig struct {
// AuditLogConfigs: The configuration for logging of each type of
// permission.
AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"`
// Service: Specifies a service that will be enabled for audit logging.
// For example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
// `allServices` is a special value that covers all services.
Service string `json:"service,omitempty"`
// ForceSendFields is a list of field names (e.g. "AuditLogConfigs") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AuditLogConfigs") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *AuditConfig) MarshalJSON() ([]byte, error) {
type NoMethod AuditConfig
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// AuditLogConfig: Provides the configuration for logging a type of
// permissions. Example: { "audit_log_configs": [ { "log_type":
// "DATA_READ", "exempted_members": [ "user:[email protected]" ] }, {
// "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and
// 'DATA_WRITE' logging, while exempting [email protected] from DATA_READ
// logging.
type AuditLogConfig struct {
// ExemptedMembers: Specifies the identities that do not cause logging
// for this type of permission. Follows the same format of
// Binding.members.
ExemptedMembers []string `json:"exemptedMembers,omitempty"`
// LogType: The log type that this config enables.
//
// Possible values:
// "LOG_TYPE_UNSPECIFIED" - Default case. Should never be this.
// "ADMIN_READ" - Admin reads. Example: CloudIAM getIamPolicy
// "DATA_WRITE" - Data writes. Example: CloudSQL Users create
// "DATA_READ" - Data reads. Example: CloudSQL Users list
LogType string `json:"logType,omitempty"`
// ForceSendFields is a list of field names (e.g. "ExemptedMembers") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ExemptedMembers") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *AuditLogConfig) MarshalJSON() ([]byte, error) {
type NoMethod AuditLogConfig
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// AuthProvider: Configuration for an authentication provider, including
// support for JSON Web Token (JWT)
// (https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).
type AuthProvider struct {
// Audiences: The list of JWT audiences
// (https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).
// that are allowed to access. A JWT containing any of these audiences
// will be accepted. When this setting is absent, JWTs with audiences: -
// "https://[service.name]/[google.protobuf.Api.name]" -
// "https://[service.name]/" will be accepted. For example, if no
// audiences are in the setting, LibraryService API will accept JWTs
// with the following audiences: -
// https://library-example.googleapis.com/google.example.library.v1.LibraryService
// - https://library-example.googleapis.com/ Example: audiences:
// bookstore_android.apps.googleusercontent.com,
// bookstore_web.apps.googleusercontent.com
Audiences string `json:"audiences,omitempty"`
// AuthorizationUrl: Redirect URL if JWT token is required but not
// present or is expired. Implement authorizationUrl of
// securityDefinitions in OpenAPI spec.
AuthorizationUrl string `json:"authorizationUrl,omitempty"`
// Id: The unique identifier of the auth provider. It will be referred
// to by `AuthRequirement.provider_id`. Example: "bookstore_auth".
Id string `json:"id,omitempty"`
// Issuer: Identifies the principal that issued the JWT. See
// https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1
// Usually a URL or an email address. Example:
// https://securetoken.google.com Example:
// [email protected]
Issuer string `json:"issuer,omitempty"`
// JwksUri: URL of the provider's public key set to validate signature
// of the JWT. See OpenID Discovery
// (https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).
// Optional if the key set document: - can be retrieved from OpenID
// Discovery
// (https://openid.net/specs/openid-connect-discovery-1_0.html) of the
// issuer. - can be inferred from the email domain of the issuer (e.g. a
// Google service account). Example:
// https://www.googleapis.com/oauth2/v1/certs
JwksUri string `json:"jwksUri,omitempty"`
// JwtLocations: Defines the locations to extract the JWT. JWT locations
// can be either from HTTP headers or URL query parameters. The rule is
// that the first match wins. The checking order is: checking all
// headers first, then URL query parameters. If not specified, default
// to use following 3 locations: 1) Authorization: Bearer 2)
// x-goog-iap-jwt-assertion 3) access_token query parameter Default
// locations can be specified as followings: jwt_locations: - header:
// Authorization value_prefix: "Bearer " - header:
// x-goog-iap-jwt-assertion - query: access_token
JwtLocations []*JwtLocation `json:"jwtLocations,omitempty"`
// ForceSendFields is a list of field names (e.g. "Audiences") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Audiences") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *AuthProvider) MarshalJSON() ([]byte, error) {
type NoMethod AuthProvider
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// AuthRequirement: User-defined authentication requirements, including
// support for JSON Web Token (JWT)
// (https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).
type AuthRequirement struct {
// Audiences: NOTE: This will be deprecated soon, once
// AuthProvider.audiences is implemented and accepted in all the runtime
// components. The list of JWT audiences
// (https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).
// that are allowed to access. A JWT containing any of these audiences
// will be accepted. When this setting is absent, only JWTs with
// audience "https://Service_name/API_name" will be accepted. For
// example, if no audiences are in the setting, LibraryService API will
// only accept JWTs with the following audience
// "https://library-example.googleapis.com/google.example.library.v1.Libr
// aryService". Example: audiences:
// bookstore_android.apps.googleusercontent.com,
// bookstore_web.apps.googleusercontent.com
Audiences string `json:"audiences,omitempty"`
// ProviderId: id from authentication provider. Example: provider_id:
// bookstore_auth
ProviderId string `json:"providerId,omitempty"`
// ForceSendFields is a list of field names (e.g. "Audiences") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Audiences") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *AuthRequirement) MarshalJSON() ([]byte, error) {
type NoMethod AuthRequirement
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Authentication: `Authentication` defines the authentication
// configuration for API methods provided by an API service. Example:
// name: calendar.googleapis.com authentication: providers: - id:
// google_calendar_auth jwks_uri:
// https://www.googleapis.com/oauth2/v1/certs issuer:
// https://securetoken.google.com rules: - selector: "*" requirements:
// provider_id: google_calendar_auth - selector:
// google.calendar.Delegate oauth: canonical_scopes:
// https://www.googleapis.com/auth/calendar.read
type Authentication struct {
// Providers: Defines a set of authentication providers that a service
// supports.
Providers []*AuthProvider `json:"providers,omitempty"`
// Rules: A list of authentication rules that apply to individual API
// methods. **NOTE:** All service configuration rules follow "last one
// wins" order.
Rules []*AuthenticationRule `json:"rules,omitempty"`
// ForceSendFields is a list of field names (e.g. "Providers") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Providers") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Authentication) MarshalJSON() ([]byte, error) {
type NoMethod Authentication
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// AuthenticationRule: Authentication rules for the service. By default,
// if a method has any authentication requirements, every request must
// include a valid credential matching one of the requirements. It's an
// error to include more than one kind of credential in a single
// request. If a method doesn't have any auth requirements, request
// credentials will be ignored.
type AuthenticationRule struct {
// AllowWithoutCredential: If true, the service accepts API keys without
// any other credential. This flag only applies to HTTP and gRPC
// requests.
AllowWithoutCredential bool `json:"allowWithoutCredential,omitempty"`
// Oauth: The requirements for OAuth credentials.
Oauth *OAuthRequirements `json:"oauth,omitempty"`
// Requirements: Requirements for additional authentication providers.
Requirements []*AuthRequirement `json:"requirements,omitempty"`
// Selector: Selects the methods to which this rule applies. Refer to
// selector for syntax details.
Selector string `json:"selector,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "AllowWithoutCredential") to unconditionally include in API requests.
// By default, fields with empty or default values are omitted from API
// requests. However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AllowWithoutCredential")
// to include in API requests with the JSON null value. By default,
// fields with empty values are omitted from API requests. However, any
// field with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *AuthenticationRule) MarshalJSON() ([]byte, error) {
type NoMethod AuthenticationRule
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Backend: `Backend` defines the backend configuration for a service.
type Backend struct {
// Rules: A list of API backend rules that apply to individual API
// methods. **NOTE:** All service configuration rules follow "last one
// wins" order.
Rules []*BackendRule `json:"rules,omitempty"`
// ForceSendFields is a list of field names (e.g. "Rules") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Rules") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Backend) MarshalJSON() ([]byte, error) {
type NoMethod Backend
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// BackendRule: A backend rule provides configuration for an individual
// API element.
type BackendRule struct {
// Address: The address of the API backend. The scheme is used to
// determine the backend protocol and security. The following schemes
// are accepted: SCHEME PROTOCOL SECURITY http:// HTTP None https://
// HTTP TLS grpc:// gRPC None grpcs:// gRPC TLS It is recommended to
// explicitly include a scheme. Leaving out the scheme may cause
// constrasting behaviors across platforms. If the port is unspecified,
// the default is: - 80 for schemes without TLS - 443 for schemes with
// TLS For HTTP backends, use protocol to specify the protocol version.
Address string `json:"address,omitempty"`
// Deadline: The number of seconds to wait for a response from a
// request. The default varies based on the request protocol and
// deployment environment.
Deadline float64 `json:"deadline,omitempty"`
// DisableAuth: When disable_auth is true, a JWT ID token won't be
// generated and the original "Authorization" HTTP header will be
// preserved. If the header is used to carry the original token and is
// expected by the backend, this field must be set to true to preserve
// the header.
DisableAuth bool `json:"disableAuth,omitempty"`
// JwtAudience: The JWT audience is used when generating a JWT ID token
// for the backend. This ID token will be added in the HTTP
// "authorization" header, and sent to the backend.
JwtAudience string `json:"jwtAudience,omitempty"`
// OperationDeadline: The number of seconds to wait for the completion
// of a long running operation. The default is no deadline.
OperationDeadline float64 `json:"operationDeadline,omitempty"`
// Possible values:
// "PATH_TRANSLATION_UNSPECIFIED"
// "CONSTANT_ADDRESS" - Use the backend address as-is, with no
// modification to the path. If the URL pattern contains variables, the
// variable names and values will be appended to the query string. If a
// query string parameter and a URL pattern variable have the same name,
// this may result in duplicate keys in the query string. # Examples
// Given the following operation config: Method path:
// /api/company/{cid}/user/{uid} Backend address:
// https://example.cloudfunctions.net/getUser Requests to the following
// request paths will call the backend at the translated path: Request
// path: /api/company/widgetworks/user/johndoe Translated:
// https://example.cloudfunctions.net/getUser?cid=widgetworks&uid=johndoe
// Request path: /api/company/widgetworks/user/johndoe?timezone=EST
// Translated:
// https://example.cloudfunctions.net/getUser?timezone=EST&cid=widgetworks&uid=johndoe
// "APPEND_PATH_TO_ADDRESS" - The request path will be appended to the
// backend address. # Examples Given the following operation config:
// Method path: /api/company/{cid}/user/{uid} Backend address:
// https://example.appspot.com Requests to the following request paths
// will call the backend at the translated path: Request path:
// /api/company/widgetworks/user/johndoe Translated:
// https://example.appspot.com/api/company/widgetworks/user/johndoe
// Request path: /api/company/widgetworks/user/johndoe?timezone=EST
// Translated:
// https://example.appspot.com/api/company/widgetworks/user/johndoe?timezone=EST
PathTranslation string `json:"pathTranslation,omitempty"`
// Protocol: The protocol used for sending a request to the backend. The
// supported values are "http/1.1" and "h2". The default value is
// inferred from the scheme in the address field: SCHEME PROTOCOL
// http:// http/1.1 https:// http/1.1 grpc:// h2 grpcs:// h2 For secure
// HTTP backends (https://) that support HTTP/2, set this field to "h2"
// for improved performance. Configuring this field to non-default
// values is only supported for secure HTTP backends. This field will be
// ignored for all other backends. See
// https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids
// for more details on the supported values.
Protocol string `json:"protocol,omitempty"`
// Selector: Selects the methods to which this rule applies. Refer to
// selector for syntax details.
Selector string `json:"selector,omitempty"`
// ForceSendFields is a list of field names (e.g. "Address") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Address") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *BackendRule) MarshalJSON() ([]byte, error) {
type NoMethod BackendRule
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *BackendRule) UnmarshalJSON(data []byte) error {
type NoMethod BackendRule
var s1 struct {
Deadline gensupport.JSONFloat64 `json:"deadline"`
OperationDeadline gensupport.JSONFloat64 `json:"operationDeadline"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.Deadline = float64(s1.Deadline)
s.OperationDeadline = float64(s1.OperationDeadline)
return nil
}
// Billing: Billing related configuration of the service. The following
// example shows how to configure monitored resources and metrics for
// billing, `consumer_destinations` is the only supported destination
// and the monitored resources need at least one label key
// `cloud.googleapis.com/location` to indicate the location of the
// billing usage, using different monitored resources between monitoring
// and billing is recommended so they can be evolved independently:
// monitored_resources: - type: library.googleapis.com/billing_branch
// labels: - key: cloud.googleapis.com/location description: |
// Predefined label to support billing location restriction. - key: city
// description: | Custom label to define the city where the library
// branch is located in. - key: name description: Custom label to define
// the name of the library branch. metrics: - name:
// library.googleapis.com/book/borrowed_count metric_kind: DELTA
// value_type: INT64 unit: "1" billing: consumer_destinations: -
// monitored_resource: library.googleapis.com/billing_branch metrics: -
// library.googleapis.com/book/borrowed_count
type Billing struct {
// ConsumerDestinations: Billing configurations for sending metrics to
// the consumer project. There can be multiple consumer destinations per
// service, each one must have a different monitored resource type. A
// metric can be used in at most one consumer destination.
ConsumerDestinations []*BillingDestination `json:"consumerDestinations,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "ConsumerDestinations") to unconditionally include in API requests.
// By default, fields with empty or default values are omitted from API
// requests. However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ConsumerDestinations") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *Billing) MarshalJSON() ([]byte, error) {
type NoMethod Billing
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// BillingDestination: Configuration of a specific billing destination
// (Currently only support bill against consumer project).
type BillingDestination struct {
// Metrics: Names of the metrics to report to this billing destination.
// Each name must be defined in Service.metrics section.
Metrics []string `json:"metrics,omitempty"`
// MonitoredResource: The monitored resource type. The type must be
// defined in Service.monitored_resources section.
MonitoredResource string `json:"monitoredResource,omitempty"`
// ForceSendFields is a list of field names (e.g. "Metrics") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Metrics") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *BillingDestination) MarshalJSON() ([]byte, error) {
type NoMethod BillingDestination
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Binding: Associates `members` with a `role`.
type Binding struct {
// Condition: The condition that is associated with this binding. If the
// condition evaluates to `true`, then this binding applies to the
// current request. If the condition evaluates to `false`, then this
// binding does not apply to the current request. However, a different
// role binding might grant the same role to one or more of the members
// in this binding. To learn which resources support conditions in their
// IAM policies, see the IAM documentation
// (https://cloud.google.com/iam/help/conditions/resource-policies).
Condition *Expr `json:"condition,omitempty"`
// Members: Specifies the identities requesting access for a Cloud
// Platform resource. `members` can have the following values: *
// `allUsers`: A special identifier that represents anyone who is on the
// internet; with or without a Google account. *
// `allAuthenticatedUsers`: A special identifier that represents anyone
// who is authenticated with a Google account or a service account. *
// `user:{emailid}`: An email address that represents a specific Google
// account. For example, `[email protected]` . *
// `serviceAccount:{emailid}`: An email address that represents a
// service account. For example,
// `[email protected]`. * `group:{emailid}`: An
// email address that represents a Google group. For example,
// `[email protected]`. * `deleted:user:{emailid}?uid={uniqueid}`: An
// email address (plus unique identifier) representing a user that has
// been recently deleted. For example,
// `[email protected]?uid=123456789012345678901`. If the user is
// recovered, this value reverts to `user:{emailid}` and the recovered
// user retains the role in the binding. *
// `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address
// (plus unique identifier) representing a service account that has been
// recently deleted. For example,
// `[email protected]?uid=123456789012345678901`.
// If the service account is undeleted, this value reverts to
// `serviceAccount:{emailid}` and the undeleted service account retains
// the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`:
// An email address (plus unique identifier) representing a Google group
// that has been recently deleted. For example,
// `[email protected]?uid=123456789012345678901`. If the group is
// recovered, this value reverts to `group:{emailid}` and the recovered
// group retains the role in the binding. * `domain:{domain}`: The G
// Suite domain (primary) that represents all the users of that domain.
// For example, `google.com` or `example.com`.
Members []string `json:"members,omitempty"`
// Role: Role that is assigned to `members`. For example,
// `roles/viewer`, `roles/editor`, or `roles/owner`.
Role string `json:"role,omitempty"`
// ForceSendFields is a list of field names (e.g. "Condition") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Condition") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Binding) MarshalJSON() ([]byte, error) {
type NoMethod Binding
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ChangeReport: Change report associated with a particular service
// configuration. It contains a list of ConfigChanges based on the
// comparison between two service configurations.
type ChangeReport struct {
// ConfigChanges: List of changes between two service configurations.
// The changes will be alphabetically sorted based on the identifier of
// each change. A ConfigChange identifier is a dot separated path to the
// configuration. Example:
// visibility.rules[selector='LibraryService.CreateBook'].restriction
ConfigChanges []*ConfigChange `json:"configChanges,omitempty"`
// ForceSendFields is a list of field names (e.g. "ConfigChanges") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ConfigChanges") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ChangeReport) MarshalJSON() ([]byte, error) {
type NoMethod ChangeReport
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ConfigChange: Output generated from semantically comparing two
// versions of a service configuration. Includes detailed information
// about a field that have changed with applicable advice about
// potential consequences for the change, such as
// backwards-incompatibility.
type ConfigChange struct {
// Advices: Collection of advice provided for this change, useful for
// determining the possible impact of this change.
Advices []*Advice `json:"advices,omitempty"`
// ChangeType: The type for this change, either ADDED, REMOVED, or
// MODIFIED.
//
// Possible values:
// "CHANGE_TYPE_UNSPECIFIED" - No value was provided.
// "ADDED" - The changed object exists in the 'new' service
// configuration, but not in the 'old' service configuration.
// "REMOVED" - The changed object exists in the 'old' service
// configuration, but not in the 'new' service configuration.
// "MODIFIED" - The changed object exists in both service
// configurations, but its value is different.
ChangeType string `json:"changeType,omitempty"`
// Element: Object hierarchy path to the change, with levels separated
// by a '.' character. For repeated fields, an applicable unique
// identifier field is used for the index (usually selector, name, or
// id). For maps, the term 'key' is used. If the field has no unique
// identifier, the numeric index is used. Examples: -
// visibility.rules[selector=="google.LibraryService.ListBooks"].restrict
// ion -
// quota.metric_rules[selector=="google"].metric_costs[key=="reads"].valu
// e - logging.producer_destinations[0]
Element string `json:"element,omitempty"`
// NewValue: Value of the changed object in the new Service
// configuration, in JSON format. This field will not be populated if
// ChangeType == REMOVED.
NewValue string `json:"newValue,omitempty"`
// OldValue: Value of the changed object in the old Service
// configuration, in JSON format. This field will not be populated if
// ChangeType == ADDED.
OldValue string `json:"oldValue,omitempty"`
// ForceSendFields is a list of field names (e.g. "Advices") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Advices") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ConfigChange) MarshalJSON() ([]byte, error) {
type NoMethod ConfigChange
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ConfigFile: Generic specification of a source configuration file
type ConfigFile struct {
// FileContents: The bytes that constitute the file.
FileContents string `json:"fileContents,omitempty"`
// FilePath: The file name of the configuration file (full or relative
// path).
FilePath string `json:"filePath,omitempty"`
// FileType: The type of configuration file this represents.
//
// Possible values:
// "FILE_TYPE_UNSPECIFIED" - Unknown file type.
// "SERVICE_CONFIG_YAML" - YAML-specification of service.
// "OPEN_API_JSON" - OpenAPI specification, serialized in JSON.
// "OPEN_API_YAML" - OpenAPI specification, serialized in YAML.
// "FILE_DESCRIPTOR_SET_PROTO" - FileDescriptorSet, generated by
// protoc. To generate, use protoc with imports and source info
// included. For an example test.proto file, the following command would
// put the value in a new file named out.pb. $protoc --include_imports
// --include_source_info test.proto -o out.pb
// "PROTO_FILE" - Uncompiled Proto file. Used for storage and display
// purposes only, currently server-side compilation is not supported.
// Should match the inputs to 'protoc' command used to generated
// FILE_DESCRIPTOR_SET_PROTO. A file of this type can only be included
// if at least one file of type FILE_DESCRIPTOR_SET_PROTO is included.
FileType string `json:"fileType,omitempty"`
// ForceSendFields is a list of field names (e.g. "FileContents") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "FileContents") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ConfigFile) MarshalJSON() ([]byte, error) {
type NoMethod ConfigFile
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ConfigRef: Represents a service configuration with its name and id.
type ConfigRef struct {
// Name: Resource name of a service config. It must have the following
// format: "services/{service name}/configs/{config id}".
Name string `json:"name,omitempty"`
// ForceSendFields is a list of field names (e.g. "Name") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Name") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ConfigRef) MarshalJSON() ([]byte, error) {
type NoMethod ConfigRef
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ConfigSource: Represents a source file which is used to generate the
// service configuration defined by `google.api.Service`.
type ConfigSource struct {
// Files: Set of source configuration files that are used to generate a
// service configuration (`google.api.Service`).
Files []*ConfigFile `json:"files,omitempty"`
// Id: A unique ID for a specific instance of this message, typically
// assigned by the client for tracking purpose. If empty, the server may
// choose to generate one instead.
Id string `json:"id,omitempty"`
// ForceSendFields is a list of field names (e.g. "Files") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Files") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ConfigSource) MarshalJSON() ([]byte, error) {
type NoMethod ConfigSource
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Context: `Context` defines which contexts an API requests. Example:
// context: rules: - selector: "*" requested: -
// google.rpc.context.ProjectContext - google.rpc.context.OriginContext
// The above specifies that all methods in the API request
// `google.rpc.context.ProjectContext` and
// `google.rpc.context.OriginContext`. Available context types are
// defined in package `google.rpc.context`. This also provides mechanism
// to allowlist any protobuf message extension that can be sent in grpc
// metadata using “x-goog-ext--bin” and “x-goog-ext--jspb”
// format. For example, list any service specific protobuf types that
// can appear in grpc metadata as follows in your yaml file: Example:
// context: rules: - selector:
// "google.example.library.v1.LibraryService.CreateBook"
// allowed_request_extensions: - google.foo.v1.NewExtension
// allowed_response_extensions: - google.foo.v1.NewExtension You can
// also specify extension ID instead of fully qualified extension name
// here.
type Context struct {
// Rules: A list of RPC context rules that apply to individual API
// methods. **NOTE:** All service configuration rules follow "last one
// wins" order.
Rules []*ContextRule `json:"rules,omitempty"`
// ForceSendFields is a list of field names (e.g. "Rules") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Rules") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Context) MarshalJSON() ([]byte, error) {
type NoMethod Context
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ContextRule: A context rule provides information about the context
// for an individual API element.
type ContextRule struct {
// AllowedRequestExtensions: A list of full type names or extension IDs
// of extensions allowed in grpc side channel from client to backend.
AllowedRequestExtensions []string `json:"allowedRequestExtensions,omitempty"`
// AllowedResponseExtensions: A list of full type names or extension IDs
// of extensions allowed in grpc side channel from backend to client.
AllowedResponseExtensions []string `json:"allowedResponseExtensions,omitempty"`
// Provided: A list of full type names of provided contexts.
Provided []string `json:"provided,omitempty"`
// Requested: A list of full type names of requested contexts.
Requested []string `json:"requested,omitempty"`
// Selector: Selects the methods to which this rule applies. Refer to
// selector for syntax details.
Selector string `json:"selector,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "AllowedRequestExtensions") to unconditionally include in API
// requests. By default, fields with empty or default values are omitted
// from API requests. However, any non-pointer, non-interface field
// appearing in ForceSendFields will be sent to the server regardless of
// whether the field is empty or not. This may be used to include empty
// fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AllowedRequestExtensions")
// to include in API requests with the JSON null value. By default,
// fields with empty values are omitted from API requests. However, any
// field with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *ContextRule) MarshalJSON() ([]byte, error) {
type NoMethod ContextRule
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Control: Selects and configures the service controller used by the
// service. The service controller handles features like abuse, quota,
// billing, logging, monitoring, etc.
type Control struct {
// Environment: The service control environment to use. If empty, no
// control plane feature (like quota and billing) will be enabled.
Environment string `json:"environment,omitempty"`
// ForceSendFields is a list of field names (e.g. "Environment") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Environment") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Control) MarshalJSON() ([]byte, error) {
type NoMethod Control
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// CustomError: Customize service error responses. For example, list any
// service specific protobuf types that can appear in error detail lists
// of error responses. Example: custom_error: types: -
// google.foo.v1.CustomError - google.foo.v1.AnotherError
type CustomError struct {
// Rules: The list of custom error rules that apply to individual API
// messages. **NOTE:** All service configuration rules follow "last one
// wins" order.
Rules []*CustomErrorRule `json:"rules,omitempty"`
// Types: The list of custom error detail types, e.g.
// 'google.foo.v1.CustomError'.
Types []string `json:"types,omitempty"`
// ForceSendFields is a list of field names (e.g. "Rules") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Rules") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *CustomError) MarshalJSON() ([]byte, error) {
type NoMethod CustomError
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// CustomErrorRule: A custom error rule.
type CustomErrorRule struct {
// IsErrorType: Mark this message as possible payload in error response.
// Otherwise, objects of this type will be filtered when they appear in
// error payload.
IsErrorType bool `json:"isErrorType,omitempty"`
// Selector: Selects messages to which this rule applies. Refer to
// selector for syntax details.
Selector string `json:"selector,omitempty"`
// ForceSendFields is a list of field names (e.g. "IsErrorType") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "IsErrorType") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *CustomErrorRule) MarshalJSON() ([]byte, error) {
type NoMethod CustomErrorRule
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// CustomHttpPattern: A custom pattern is used for defining custom HTTP
// verb.
type CustomHttpPattern struct {
// Kind: The name of this custom HTTP verb.
Kind string `json:"kind,omitempty"`
// Path: The path matched by this custom verb.
Path string `json:"path,omitempty"`
// ForceSendFields is a list of field names (e.g. "Kind") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Kind") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *CustomHttpPattern) MarshalJSON() ([]byte, error) {
type NoMethod CustomHttpPattern
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// DeleteServiceStrategy: Strategy used to delete a service. This
// strategy is a placeholder only used by the system generated rollout
// to delete a service.
type DeleteServiceStrategy struct {
}
// Diagnostic: Represents a diagnostic message (error or warning)
type Diagnostic struct {
// Kind: The kind of diagnostic information provided.
//
// Possible values:
// "WARNING" - Warnings and errors
// "ERROR" - Only errors
Kind string `json:"kind,omitempty"`
// Location: File name and line number of the error or warning.
Location string `json:"location,omitempty"`
// Message: Message describing the error or warning.
Message string `json:"message,omitempty"`
// ForceSendFields is a list of field names (e.g. "Kind") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Kind") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Diagnostic) MarshalJSON() ([]byte, error) {
type NoMethod Diagnostic
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Documentation: `Documentation` provides the information for
// describing a service. Example: documentation: summary: > The Google
// Calendar API gives access to most calendar features. pages: - name:
// Overview content: (== include google/foo/overview.md ==) - name:
// Tutorial content: (== include google/foo/tutorial.md ==) subpages; -
// name: Java content: (== include google/foo/tutorial_java.md ==)
// rules: - selector: google.calendar.Calendar.Get description: > ... -
// selector: google.calendar.Calendar.Put description: > ...
// Documentation is provided in markdown syntax. In addition to standard
// markdown features, definition lists, tables and fenced code blocks
// are supported. Section headers can be provided and are interpreted
// relative to the section nesting of the context where a documentation
// fragment is embedded. Documentation from the IDL is merged with
// documentation defined via the config at normalization time, where
// documentation provided by config rules overrides IDL provided. A
// number of constructs specific to the API platform are supported in
// documentation text. In order to reference a proto element, the
// following notation can be used: [fully.qualified.proto.name][] To
// override the display text used for the link, this can be used:
// [display text][fully.qualified.proto.name] Text can be excluded from
// doc using the following notation: (-- internal comment --) A few
// directives are available in documentation. Note that directives must
// appear on a single line to be properly identified. The `include`
// directive includes a markdown file from an external source: (==
// include path/to/file ==) The `resource_for` directive marks a message
// to be the resource of a collection in REST view. If it is not
// specified, tools attempt to infer the resource from the operations in
// a collection: (== resource_for v1.shelves.books ==) The directive
// `suppress_warning` does not directly affect documentation and is
// documented together with service config validation.
type Documentation struct {
// DocumentationRootUrl: The URL to the root of documentation.
DocumentationRootUrl string `json:"documentationRootUrl,omitempty"`
// Overview: Declares a single overview page. For example:
// documentation: summary: ... overview: (== include overview.md ==)
// This is a shortcut for the following declaration (using pages style):
// documentation: summary: ... pages: - name: Overview content: (==
// include overview.md ==) Note: you cannot specify both `overview`
// field and `pages` field.
Overview string `json:"overview,omitempty"`
// Pages: The top level pages for the documentation set.
Pages []*Page `json:"pages,omitempty"`
// Rules: A list of documentation rules that apply to individual API
// elements. **NOTE:** All service configuration rules follow "last one
// wins" order.
Rules []*DocumentationRule `json:"rules,omitempty"`
// ServiceRootUrl: Specifies the service root url if the default one
// (the service name from the yaml file) is not suitable. This can be
// seen in any fully specified service urls as well as sections that
// show a base that other urls are relative to.
ServiceRootUrl string `json:"serviceRootUrl,omitempty"`
// Summary: A short description of what the service does. The summary
// must be plain text. It becomes the overview of the service displayed
// in Google Cloud Console. NOTE: This field is equivalent to the
// standard field `description`.
Summary string `json:"summary,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "DocumentationRootUrl") to unconditionally include in API requests.
// By default, fields with empty or default values are omitted from API
// requests. However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DocumentationRootUrl") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *Documentation) MarshalJSON() ([]byte, error) {
type NoMethod Documentation
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// DocumentationRule: A documentation rule provides information about
// individual API elements.
type DocumentationRule struct {
// DeprecationDescription: Deprecation description of the selected
// element(s). It can be provided if an element is marked as
// `deprecated`.
DeprecationDescription string `json:"deprecationDescription,omitempty"`
// Description: Description of the selected proto element (e.g. a
// message, a method, a 'service' definition, or a field). Defaults to
// leading & trailing comments taken from the proto source definition of
// the proto element.
Description string `json:"description,omitempty"`
// Selector: The selector is a comma-separated list of patterns for any
// element such as a method, a field, an enum value. Each pattern is a
// qualified name of the element which may end in "*", indicating a
// wildcard. Wildcards are only allowed at the end and for a whole
// component of the qualified name, i.e. "foo.*" is ok, but not "foo.b*"
// or "foo.*.bar". A wildcard will match one or more components. To
// specify a default for all applicable elements, the whole pattern "*"
// is used.
Selector string `json:"selector,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "DeprecationDescription") to unconditionally include in API requests.
// By default, fields with empty or default values are omitted from API
// requests. However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DeprecationDescription")
// to include in API requests with the JSON null value. By default,
// fields with empty values are omitted from API requests. However, any
// field with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *DocumentationRule) MarshalJSON() ([]byte, error) {
type NoMethod DocumentationRule
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// EnableServiceResponse: Operation payload for EnableService method.
type EnableServiceResponse struct {
}
// Endpoint: `Endpoint` describes a network address of a service that
// serves a set of APIs. It is commonly known as a service endpoint. A
// service may expose any number of service endpoints, and all service
// endpoints share the same service definition, such as quota limits and
// monitoring metrics. Example: type: google.api.Service name:
// library-example.googleapis.com endpoints: # Declares network address
// `https://library-example.googleapis.com` # for service
// `library-example.googleapis.com`. The `https` scheme # is implicit
// for all service endpoints. Other schemes may be # supported in the
// future. - name: library-example.googleapis.com allow_cors: false -
// name: content-staging-library-example.googleapis.com # Allows HTTP
// OPTIONS calls to be passed to the API frontend, for it # to decide
// whether the subsequent cross-origin request is allowed # to proceed.
// allow_cors: true
type Endpoint struct {
// AllowCors: Allowing CORS
// (https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka
// cross-domain traffic, would allow the backends served from this
// endpoint to receive and respond to HTTP OPTIONS requests. The
// response will be used by the browser to determine whether the
// subsequent cross-origin request is allowed to proceed.
AllowCors bool `json:"allowCors,omitempty"`
// Name: The canonical name of this endpoint.
Name string `json:"name,omitempty"`
// Target: The specification of an Internet routable address of API
// frontend that will handle requests to this API Endpoint
// (https://cloud.google.com/apis/design/glossary). It should be either
// a valid IPv4 address or a fully-qualified domain name. For example,
// "8.8.8.8" or "myservice.appspot.com".
Target string `json:"target,omitempty"`
// ForceSendFields is a list of field names (e.g. "AllowCors") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AllowCors") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Endpoint) MarshalJSON() ([]byte, error) {
type NoMethod Endpoint
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Enum: Enum type definition.
type Enum struct {
// Enumvalue: Enum value definitions.
Enumvalue []*EnumValue `json:"enumvalue,omitempty"`
// Name: Enum type name.
Name string `json:"name,omitempty"`
// Options: Protocol buffer options.
Options []*Option `json:"options,omitempty"`
// SourceContext: The source context.
SourceContext *SourceContext `json:"sourceContext,omitempty"`
// Syntax: The source syntax.
//
// Possible values:
// "SYNTAX_PROTO2" - Syntax `proto2`.
// "SYNTAX_PROTO3" - Syntax `proto3`.
Syntax string `json:"syntax,omitempty"`
// ForceSendFields is a list of field names (e.g. "Enumvalue") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Enumvalue") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Enum) MarshalJSON() ([]byte, error) {
type NoMethod Enum
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// EnumValue: Enum value definition.
type EnumValue struct {
// Name: Enum value name.
Name string `json:"name,omitempty"`
// Number: Enum value number.
Number int64 `json:"number,omitempty"`
// Options: Protocol buffer options.
Options []*Option `json:"options,omitempty"`
// ForceSendFields is a list of field names (e.g. "Name") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Name") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *EnumValue) MarshalJSON() ([]byte, error) {
type NoMethod EnumValue
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Expr: Represents a textual expression in the Common Expression
// Language (CEL) syntax. CEL is a C-like expression language. The
// syntax and semantics of CEL are documented at
// https://github.com/google/cel-spec. Example (Comparison): title:
// "Summary size limit" description: "Determines if a summary is less
// than 100 chars" expression: "document.summary.size() < 100" Example
// (Equality): title: "Requestor is owner" description: "Determines if
// requestor is the document owner" expression: "document.owner ==
// request.auth.claims.email" Example (Logic): title: "Public documents"
// description: "Determine whether the document should be publicly
// visible" expression: "document.type != 'private' && document.type !=
// 'internal'" Example (Data Manipulation): title: "Notification string"
// description: "Create a notification string with a timestamp."
// expression: "'New message received at ' +
// string(document.create_time)" The exact variables and functions that
// may be referenced within an expression are determined by the service
// that evaluates it. See the service documentation for additional
// information.
type Expr struct {
// Description: Optional. Description of the expression. This is a
// longer text which describes the expression, e.g. when hovered over it
// in a UI.
Description string `json:"description,omitempty"`
// Expression: Textual representation of an expression in Common
// Expression Language syntax.
Expression string `json:"expression,omitempty"`
// Location: Optional. String indicating the location of the expression
// for error reporting, e.g. a file name and a position in the file.
Location string `json:"location,omitempty"`
// Title: Optional. Title for the expression, i.e. a short string
// describing its purpose. This can be used e.g. in UIs which allow to
// enter the expression.
Title string `json:"title,omitempty"`
// ForceSendFields is a list of field names (e.g. "Description") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Description") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Expr) MarshalJSON() ([]byte, error) {
type NoMethod Expr
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Field: A single field of a message type.
type Field struct {
// Cardinality: The field cardinality.
//
// Possible values:
// "CARDINALITY_UNKNOWN" - For fields with unknown cardinality.
// "CARDINALITY_OPTIONAL" - For optional fields.
// "CARDINALITY_REQUIRED" - For required fields. Proto2 syntax only.
// "CARDINALITY_REPEATED" - For repeated fields.
Cardinality string `json:"cardinality,omitempty"`
// DefaultValue: The string value of the default value of this field.
// Proto2 syntax only.
DefaultValue string `json:"defaultValue,omitempty"`
// JsonName: The field JSON name.
JsonName string `json:"jsonName,omitempty"`
// Kind: The field type.
//
// Possible values:
// "TYPE_UNKNOWN" - Field type unknown.
// "TYPE_DOUBLE" - Field type double.
// "TYPE_FLOAT" - Field type float.
// "TYPE_INT64" - Field type int64.
// "TYPE_UINT64" - Field type uint64.
// "TYPE_INT32" - Field type int32.
// "TYPE_FIXED64" - Field type fixed64.
// "TYPE_FIXED32" - Field type fixed32.
// "TYPE_BOOL" - Field type bool.
// "TYPE_STRING" - Field type string.
// "TYPE_GROUP" - Field type group. Proto2 syntax only, and
// deprecated.
// "TYPE_MESSAGE" - Field type message.
// "TYPE_BYTES" - Field type bytes.
// "TYPE_UINT32" - Field type uint32.
// "TYPE_ENUM" - Field type enum.
// "TYPE_SFIXED32" - Field type sfixed32.
// "TYPE_SFIXED64" - Field type sfixed64.
// "TYPE_SINT32" - Field type sint32.
// "TYPE_SINT64" - Field type sint64.
Kind string `json:"kind,omitempty"`
// Name: The field name.
Name string `json:"name,omitempty"`
// Number: The field number.
Number int64 `json:"number,omitempty"`
// OneofIndex: The index of the field type in `Type.oneofs`, for message
// or enumeration types. The first type has index 1; zero means the type
// is not in the list.
OneofIndex int64 `json:"oneofIndex,omitempty"`
// Options: The protocol buffer options.
Options []*Option `json:"options,omitempty"`
// Packed: Whether to use alternative packed wire representation.
Packed bool `json:"packed,omitempty"`
// TypeUrl: The field type URL, without the scheme, for message or
// enumeration types. Example:
// "type.googleapis.com/google.protobuf.Timestamp".
TypeUrl string `json:"typeUrl,omitempty"`
// ForceSendFields is a list of field names (e.g. "Cardinality") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Cardinality") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Field) MarshalJSON() ([]byte, error) {
type NoMethod Field
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// FlowErrorDetails: Encapsulation of flow-specific error details for
// debugging. Used as a details field on an error Status, not intended
// for external use.
type FlowErrorDetails struct {
// ExceptionType: The type of exception (as a class name).
ExceptionType string `json:"exceptionType,omitempty"`
// FlowStepId: The step that failed.
FlowStepId string `json:"flowStepId,omitempty"`
// ForceSendFields is a list of field names (e.g. "ExceptionType") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ExceptionType") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *FlowErrorDetails) MarshalJSON() ([]byte, error) {
type NoMethod FlowErrorDetails
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GenerateConfigReportRequest: Request message for GenerateConfigReport
// method.
type GenerateConfigReportRequest struct {
// NewConfig: Required. Service configuration for which we want to
// generate the report. For this version of API, the supported types are
// google.api.servicemanagement.v1.ConfigRef,
// google.api.servicemanagement.v1.ConfigSource, and google.api.Service
NewConfig googleapi.RawMessage `json:"newConfig,omitempty"`
// OldConfig: Optional. Service configuration against which the
// comparison will be done. For this version of API, the supported types
// are google.api.servicemanagement.v1.ConfigRef,
// google.api.servicemanagement.v1.ConfigSource, and google.api.Service
OldConfig googleapi.RawMessage `json:"oldConfig,omitempty"`
// ForceSendFields is a list of field names (e.g. "NewConfig") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NewConfig") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GenerateConfigReportRequest) MarshalJSON() ([]byte, error) {
type NoMethod GenerateConfigReportRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GenerateConfigReportResponse: Response message for
// GenerateConfigReport method.
type GenerateConfigReportResponse struct {
// ChangeReports: list of ChangeReport, each corresponding to comparison
// between two service configurations.
ChangeReports []*ChangeReport `json:"changeReports,omitempty"`
// Diagnostics: Errors / Linter warnings associated with the service
// definition this report belongs to.
Diagnostics []*Diagnostic `json:"diagnostics,omitempty"`
// Id: ID of the service configuration this report belongs to.
Id string `json:"id,omitempty"`
// ServiceName: Name of the service this report belongs to.
ServiceName string `json:"serviceName,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "ChangeReports") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ChangeReports") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GenerateConfigReportResponse) MarshalJSON() ([]byte, error) {
type NoMethod GenerateConfigReportResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GetIamPolicyRequest: Request message for `GetIamPolicy` method.
type GetIamPolicyRequest struct {
// Options: OPTIONAL: A `GetPolicyOptions` object for specifying options
// to `GetIamPolicy`.
Options *GetPolicyOptions `json:"options,omitempty"`
// ForceSendFields is a list of field names (e.g. "Options") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Options") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GetIamPolicyRequest) MarshalJSON() ([]byte, error) {
type NoMethod GetIamPolicyRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GetPolicyOptions: Encapsulates settings provided to GetIamPolicy.
type GetPolicyOptions struct {
// RequestedPolicyVersion: Optional. The policy format version to be
// returned. Valid values are 0, 1, and 3. Requests specifying an
// invalid value will be rejected. Requests for policies with any
// conditional bindings must specify version 3. Policies without any
// conditional bindings may specify any valid value or leave the field
// unset. To learn which resources support conditions in their IAM
// policies, see the IAM documentation
// (https://cloud.google.com/iam/help/conditions/resource-policies).
RequestedPolicyVersion int64 `json:"requestedPolicyVersion,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "RequestedPolicyVersion") to unconditionally include in API requests.
// By default, fields with empty or default values are omitted from API
// requests. However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "RequestedPolicyVersion")
// to include in API requests with the JSON null value. By default,
// fields with empty values are omitted from API requests. However, any
// field with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *GetPolicyOptions) MarshalJSON() ([]byte, error) {
type NoMethod GetPolicyOptions
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Http: Defines the HTTP configuration for an API service. It contains
// a list of HttpRule, each specifying the mapping of an RPC method to
// one or more HTTP REST API methods.
type Http struct {
// FullyDecodeReservedExpansion: When set to true, URL path parameters
// will be fully URI-decoded except in cases of single segment matches
// in reserved expansion, where "%2F" will be left encoded. The default
// behavior is to not decode RFC 6570 reserved characters in multi
// segment matches.
FullyDecodeReservedExpansion bool `json:"fullyDecodeReservedExpansion,omitempty"`
// Rules: A list of HTTP configuration rules that apply to individual
// API methods. **NOTE:** All service configuration rules follow "last
// one wins" order.
Rules []*HttpRule `json:"rules,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "FullyDecodeReservedExpansion") to unconditionally include in API
// requests. By default, fields with empty or default values are omitted
// from API requests. However, any non-pointer, non-interface field
// appearing in ForceSendFields will be sent to the server regardless of
// whether the field is empty or not. This may be used to include empty
// fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g.
// "FullyDecodeReservedExpansion") to include in API requests with the
// JSON null value. By default, fields with empty values are omitted
// from API requests. However, any field with an empty value appearing
// in NullFields will be sent to the server as null. It is an error if a
// field in this list has a non-empty value. This may be used to include
// null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Http) MarshalJSON() ([]byte, error) {
type NoMethod Http
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// HttpRule: # gRPC Transcoding gRPC Transcoding is a feature for
// mapping between a gRPC method and one or more HTTP REST endpoints. It
// allows developers to build a single API service that supports both
// gRPC APIs and REST APIs. Many systems, including Google APIs
// (https://github.com/googleapis/googleapis), Cloud Endpoints
// (https://cloud.google.com/endpoints), gRPC Gateway
// (https://github.com/grpc-ecosystem/grpc-gateway), and Envoy
// (https://github.com/envoyproxy/envoy) proxy support this feature and
// use it for large scale production services. `HttpRule` defines the
// schema of the gRPC/REST mapping. The mapping specifies how different
// portions of the gRPC request message are mapped to the URL path, URL
// query parameters, and HTTP request body. It also controls how the
// gRPC response message is mapped to the HTTP response body. `HttpRule`
// is typically specified as an `google.api.http` annotation on the gRPC
// method. Each mapping specifies a URL path template and an HTTP
// method. The path template may refer to one or more fields in the gRPC
// request message, as long as each field is a non-repeated field with a
// primitive (non-message) type. The path template controls how fields
// of the request message are mapped to the URL path. Example: service
// Messaging { rpc GetMessage(GetMessageRequest) returns (Message) {
// option (google.api.http) = { get: "/v1/{name=messages/*}" }; } }
// message GetMessageRequest { string name = 1; // Mapped to URL path. }
// message Message { string text = 1; // The resource content. } This
// enables an HTTP REST to gRPC mapping as below: HTTP | gRPC
// -----|----- `GET /v1/messages/123456` | `GetMessage(name:
// "messages/123456")` Any fields in the request message which are not
// bound by the path template automatically become HTTP query parameters
// if there is no HTTP request body. For example: service Messaging {
// rpc GetMessage(GetMessageRequest) returns (Message) { option
// (google.api.http) = { get:"/v1/messages/{message_id}" }; } } message
// GetMessageRequest { message SubMessage { string subfield = 1; }
// string message_id = 1; // Mapped to URL path. int64 revision = 2; //
// Mapped to URL query parameter `revision`. SubMessage sub = 3; //
// Mapped to URL query parameter `sub.subfield`. } This enables a HTTP
// JSON to RPC mapping as below: HTTP | gRPC -----|----- `GET
// /v1/messages/123456?revision=2&sub.subfield=foo` |
// `GetMessage(message_id: "123456" revision: 2 sub:
// SubMessage(subfield: "foo"))` Note that fields which are mapped to
// URL query parameters must have a primitive type or a repeated
// primitive type or a non-repeated message type. In the case of a
// repeated type, the parameter can be repeated in the URL as
// `...?param=A¶m=B`. In the case of a message type, each field of
// the message is mapped to a separate parameter, such as
// `...?foo.a=A&foo.b=B&foo.c=C`. For HTTP methods that allow a request
// body, the `body` field specifies the mapping. Consider a REST update
// method on the message resource collection: service Messaging { rpc
// UpdateMessage(UpdateMessageRequest) returns (Message) { option
// (google.api.http) = { patch: "/v1/messages/{message_id}" body:
// "message" }; } } message UpdateMessageRequest { string message_id =
// 1; // mapped to the URL Message message = 2; // mapped to the body }
// The following HTTP JSON to RPC mapping is enabled, where the
// representation of the JSON in the request body is determined by
// protos JSON encoding: HTTP | gRPC -----|----- `PATCH
// /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id:
// "123456" message { text: "Hi!" })` The special name `*` can be used
// in the body mapping to define that every field not bound by the path
// template should be mapped to the request body. This enables the
// following alternative definition of the update method: service
// Messaging { rpc UpdateMessage(Message) returns (Message) { option
// (google.api.http) = { patch: "/v1/messages/{message_id}" body: "*" };
// } } message Message { string message_id = 1; string text = 2; } The
// following HTTP JSON to RPC mapping is enabled: HTTP | gRPC
// -----|----- `PATCH /v1/messages/123456 { "text": "Hi!" }` |
// `UpdateMessage(message_id: "123456" text: "Hi!")` Note that when
// using `*` in the body mapping, it is not possible to have HTTP
// parameters, as all fields not bound by the path end in the body. This
// makes this option more rarely used in practice when defining REST
// APIs. The common usage of `*` is in custom methods which don't use
// the URL at all for transferring data. It is possible to define
// multiple HTTP methods for one RPC by using the `additional_bindings`
// option. Example: service Messaging { rpc
// GetMessage(GetMessageRequest) returns (Message) { option
// (google.api.http) = { get: "/v1/messages/{message_id}"
// additional_bindings { get:
// "/v1/users/{user_id}/messages/{message_id}" } }; } } message
// GetMessageRequest { string message_id = 1; string user_id = 2; } This
// enables the following two alternative HTTP JSON to RPC mappings: HTTP
// | gRPC -----|----- `GET /v1/messages/123456` |
// `GetMessage(message_id: "123456")` `GET /v1/users/me/messages/123456`
// | `GetMessage(user_id: "me" message_id: "123456")` ## Rules for HTTP
// mapping 1. Leaf request fields (recursive expansion nested messages
// in the request message) are classified into three categories: -
// Fields referred by the path template. They are passed via the URL
// path. - Fields referred by the HttpRule.body. They are passed via the
// HTTP request body. - All other fields are passed via the URL query
// parameters, and the parameter name is the field path in the request
// message. A repeated field can be represented as multiple query
// parameters under the same name. 2. If HttpRule.body is "*", there is
// no URL query parameter, all fields are passed via URL path and HTTP
// request body. 3. If HttpRule.body is omitted, there is no HTTP
// request body, all fields are passed via URL path and URL query
// parameters. ### Path template syntax Template = "/" Segments [ Verb ]
// ; Segments = Segment { "/" Segment } ; Segment = "*" | "**" | LITERAL
// | Variable ; Variable = "{" FieldPath [ "=" Segments ] "}" ;
// FieldPath = IDENT { "." IDENT } ; Verb = ":" LITERAL ; The syntax `*`
// matches a single URL path segment. The syntax `**` matches zero or
// more URL path segments, which must be the last part of the URL path
// except the `Verb`. The syntax `Variable` matches part of the URL path
// as specified by its template. A variable template must not contain
// other variables. If a variable matches a single path segment, its
// template may be omitted, e.g. `{var}` is equivalent to `{var=*}`. The
// syntax `LITERAL` matches literal text in the URL path. If the
// `LITERAL` contains any reserved character, such characters should be
// percent-encoded before the matching. If a variable contains exactly
// one path segment, such as "{var}" or "{var=*}", when such a
// variable is expanded into a URL path on the client side, all
// characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The server
// side does the reverse decoding. Such variables show up in the
// Discovery Document
// (https://developers.google.com/discovery/v1/reference/apis) as
// `{var}`. If a variable contains multiple path segments, such as
// "{var=foo/*}" or "{var=**}", when such a variable is expanded
// into a URL path on the client side, all characters except
// `[-_.~/0-9a-zA-Z]` are percent-encoded. The server side does the
// reverse decoding, except "%2F" and "%2f" are left unchanged. Such
// variables show up in the Discovery Document
// (https://developers.google.com/discovery/v1/reference/apis) as
// `{+var}`. ## Using gRPC API Service Configuration gRPC API Service
// Configuration (service config) is a configuration language for
// configuring a gRPC service to become a user-facing product. The
// service config is simply the YAML representation of the
// `google.api.Service` proto message. As an alternative to annotating
// your proto file, you can configure gRPC transcoding in your service
// config YAML files. You do this by specifying a `HttpRule` that maps
// the gRPC method to a REST endpoint, achieving the same effect as the
// proto annotation. This can be particularly useful if you have a proto
// that is reused in multiple services. Note that any transcoding
// specified in the service config will override any matching
// transcoding configuration in the proto. Example: http: rules: #
// Selects a gRPC method and applies HttpRule to it. - selector:
// example.v1.Messaging.GetMessage get:
// /v1/messages/{message_id}/{sub.subfield} ## Special notes When gRPC
// Transcoding is used to map a gRPC to JSON REST endpoints, the proto
// to JSON conversion must follow the proto3 specification
// (https://developers.google.com/protocol-buffers/docs/proto3#json).
// While the single segment variable follows the semantics of RFC 6570
// (https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String
// Expansion, the multi segment variable **does not** follow RFC 6570
// Section 3.2.3 Reserved Expansion. The reason is that the Reserved
// Expansion does not expand special characters like `?` and `#`, which
// would lead to invalid URLs. As the result, gRPC Transcoding uses a
// custom encoding for multi segment variables. The path variables
// **must not** refer to any repeated or mapped field, because client
// libraries are not capable of handling such variable expansion. The
// path variables **must not** capture the leading "/" character. The
// reason is that the most common use case "{var}" does not capture the
// leading "/" character. For consistency, all path variables must share
// the same behavior. Repeated message fields must not be mapped to URL
// query parameters, because no client library can support such
// complicated mapping. If an API needs to use a JSON array for request
// or response body, it can map the request or response body to a
// repeated field. However, some gRPC Transcoding implementations may
// not support this feature.
type HttpRule struct {
// AdditionalBindings: Additional HTTP bindings for the selector. Nested
// bindings must not contain an `additional_bindings` field themselves
// (that is, the nesting may only be one level deep).
AdditionalBindings []*HttpRule `json:"additionalBindings,omitempty"`
// Body: The name of the request field whose value is mapped to the HTTP
// request body, or `*` for mapping all request fields not captured by
// the path pattern to the HTTP body, or omitted for not having any HTTP
// request body. NOTE: the referred field must be present at the
// top-level of the request message type.
Body string `json:"body,omitempty"`
// Custom: The custom pattern is used for specifying an HTTP method that
// is not included in the `pattern` field, such as HEAD, or "*" to leave
// the HTTP method unspecified for this rule. The wild-card rule is
// useful for services that provide content to Web (HTML) clients.
Custom *CustomHttpPattern `json:"custom,omitempty"`
// Delete: Maps to HTTP DELETE. Used for deleting a resource.
Delete string `json:"delete,omitempty"`
// Get: Maps to HTTP GET. Used for listing and getting information about
// resources.
Get string `json:"get,omitempty"`
// Patch: Maps to HTTP PATCH. Used for updating a resource.
Patch string `json:"patch,omitempty"`
// Post: Maps to HTTP POST. Used for creating a resource or performing
// an action.
Post string `json:"post,omitempty"`
// Put: Maps to HTTP PUT. Used for replacing a resource.
Put string `json:"put,omitempty"`
// ResponseBody: Optional. The name of the response field whose value is
// mapped to the HTTP response body. When omitted, the entire response
// message will be used as the HTTP response body. NOTE: The referred
// field must be present at the top-level of the response message type.
ResponseBody string `json:"responseBody,omitempty"`
// Selector: Selects a method to which this rule applies. Refer to
// selector for syntax details.
Selector string `json:"selector,omitempty"`
// ForceSendFields is a list of field names (e.g. "AdditionalBindings")
// to unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AdditionalBindings") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *HttpRule) MarshalJSON() ([]byte, error) {
type NoMethod HttpRule
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// JwtLocation: Specifies a location to extract JWT from an API request.
type JwtLocation struct {
// Header: Specifies HTTP header name to extract JWT token.
Header string `json:"header,omitempty"`
// Query: Specifies URL query parameter name to extract JWT token.
Query string `json:"query,omitempty"`
// ValuePrefix: The value prefix. The value format is
// "value_prefix{token}" Only applies to "in" header type. Must be empty
// for "in" query type. If not empty, the header value has to match
// (case sensitive) this prefix. If not matched, JWT will not be
// extracted. If matched, JWT will be extracted after the prefix is
// removed. For example, for "Authorization: Bearer {JWT}",
// value_prefix="Bearer " with a space at the end.
ValuePrefix string `json:"valuePrefix,omitempty"`
// ForceSendFields is a list of field names (e.g. "Header") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Header") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *JwtLocation) MarshalJSON() ([]byte, error) {
type NoMethod JwtLocation
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// LabelDescriptor: A description of a label.
type LabelDescriptor struct {
// Description: A human-readable description for the label.
Description string `json:"description,omitempty"`
// Key: The label key.
Key string `json:"key,omitempty"`
// ValueType: The type of data that can be assigned to the label.
//
// Possible values:
// "STRING" - A variable-length string. This is the default.
// "BOOL" - Boolean; true or false.
// "INT64" - A 64-bit signed integer.
ValueType string `json:"valueType,omitempty"`
// ForceSendFields is a list of field names (e.g. "Description") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Description") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *LabelDescriptor) MarshalJSON() ([]byte, error) {
type NoMethod LabelDescriptor
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListOperationsResponse: The response message for
// Operations.ListOperations.
type ListOperationsResponse struct {
// NextPageToken: The standard List next-page token.
NextPageToken string `json:"nextPageToken,omitempty"`
// Operations: A list of operations that matches the specified filter in
// the request.
Operations []*Operation `json:"operations,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListOperationsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListServiceConfigsResponse: Response message for ListServiceConfigs
// method.
type ListServiceConfigsResponse struct {
// NextPageToken: The token of the next page of results.
NextPageToken string `json:"nextPageToken,omitempty"`
// ServiceConfigs: The list of service configuration resources.
ServiceConfigs []*Service `json:"serviceConfigs,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListServiceConfigsResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListServiceConfigsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListServiceRolloutsResponse: Response message for ListServiceRollouts
// method.
type ListServiceRolloutsResponse struct {
// NextPageToken: The token of the next page of results.
NextPageToken string `json:"nextPageToken,omitempty"`
// Rollouts: The list of rollout resources.
Rollouts []*Rollout `json:"rollouts,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListServiceRolloutsResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListServiceRolloutsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListServicesResponse: Response message for `ListServices` method.
type ListServicesResponse struct {
// NextPageToken: Token that can be passed to `ListServices` to resume a
// paginated query.
NextPageToken string `json:"nextPageToken,omitempty"`
// Services: The returned services will only have the name field set.
Services []*ManagedService `json:"services,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListServicesResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListServicesResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// LogDescriptor: A description of a log type. Example in YAML format: -
// name: library.googleapis.com/activity_history description: The
// history of borrowing and returning library items. display_name:
// Activity labels: - key: /customer_id description: Identifier of a
// library customer
type LogDescriptor struct {
// Description: A human-readable description of this log. This
// information appears in the documentation and can contain details.
Description string `json:"description,omitempty"`
// DisplayName: The human-readable name for this log. This information
// appears on the user interface and should be concise.
DisplayName string `json:"displayName,omitempty"`
// Labels: The set of labels that are available to describe a specific
// log entry. Runtime requests that contain labels not specified here
// are considered invalid.
Labels []*LabelDescriptor `json:"labels,omitempty"`
// Name: The name of the log. It must be less than 512 characters long
// and can include the following characters: upper- and lower-case
// alphanumeric characters [A-Za-z0-9], and punctuation characters
// including slash, underscore, hyphen, period [/_-.].
Name string `json:"name,omitempty"`
// ForceSendFields is a list of field names (e.g. "Description") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Description") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *LogDescriptor) MarshalJSON() ([]byte, error) {
type NoMethod LogDescriptor
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Logging: Logging configuration of the service. The following example
// shows how to configure logs to be sent to the producer and consumer
// projects. In the example, the `activity_history` log is sent to both
// the producer and consumer projects, whereas the `purchase_history`
// log is only sent to the producer project. monitored_resources: -
// type: library.googleapis.com/branch labels: - key: /city description:
// The city where the library branch is located in. - key: /name
// description: The name of the branch. logs: - name: activity_history
// labels: - key: /customer_id - name: purchase_history logging:
// producer_destinations: - monitored_resource:
// library.googleapis.com/branch logs: - activity_history -
// purchase_history consumer_destinations: - monitored_resource:
// library.googleapis.com/branch logs: - activity_history
type Logging struct {
// ConsumerDestinations: Logging configurations for sending logs to the
// consumer project. There can be multiple consumer destinations, each
// one must have a different monitored resource type. A log can be used
// in at most one consumer destination.
ConsumerDestinations []*LoggingDestination `json:"consumerDestinations,omitempty"`
// ProducerDestinations: Logging configurations for sending logs to the
// producer project. There can be multiple producer destinations, each
// one must have a different monitored resource type. A log can be used
// in at most one producer destination.
ProducerDestinations []*LoggingDestination `json:"producerDestinations,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "ConsumerDestinations") to unconditionally include in API requests.
// By default, fields with empty or default values are omitted from API
// requests. However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ConsumerDestinations") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *Logging) MarshalJSON() ([]byte, error) {
type NoMethod Logging
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// LoggingDestination: Configuration of a specific logging destination
// (the producer project or the consumer project).
type LoggingDestination struct {
// Logs: Names of the logs to be sent to this destination. Each name
// must be defined in the Service.logs section. If the log name is not a
// domain scoped name, it will be automatically prefixed with the
// service name followed by "/".
Logs []string `json:"logs,omitempty"`
// MonitoredResource: The monitored resource type. The type must be
// defined in the Service.monitored_resources section.
MonitoredResource string `json:"monitoredResource,omitempty"`
// ForceSendFields is a list of field names (e.g. "Logs") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Logs") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *LoggingDestination) MarshalJSON() ([]byte, error) {
type NoMethod LoggingDestination
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ManagedService: The full representation of a Service that is managed
// by Google Service Management.
type ManagedService struct {
// ProducerProjectId: ID of the project that produces and owns this
// service.
ProducerProjectId string `json:"producerProjectId,omitempty"`
// ServiceName: The name of the service. See the overview
// (/service-management/overview) for naming requirements.
ServiceName string `json:"serviceName,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "ProducerProjectId")
// to unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ProducerProjectId") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *ManagedService) MarshalJSON() ([]byte, error) {
type NoMethod ManagedService
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Method: Method represents a method of an API interface.
type Method struct {
// Name: The simple name of this method.
Name string `json:"name,omitempty"`
// Options: Any metadata attached to the method.
Options []*Option `json:"options,omitempty"`
// RequestStreaming: If true, the request is streamed.
RequestStreaming bool `json:"requestStreaming,omitempty"`
// RequestTypeUrl: A URL of the input message type.
RequestTypeUrl string `json:"requestTypeUrl,omitempty"`
// ResponseStreaming: If true, the response is streamed.
ResponseStreaming bool `json:"responseStreaming,omitempty"`
// ResponseTypeUrl: The URL of the output message type.
ResponseTypeUrl string `json:"responseTypeUrl,omitempty"`
// Syntax: The source syntax of this method.
//
// Possible values:
// "SYNTAX_PROTO2" - Syntax `proto2`.
// "SYNTAX_PROTO3" - Syntax `proto3`.
Syntax string `json:"syntax,omitempty"`
// ForceSendFields is a list of field names (e.g. "Name") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Name") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Method) MarshalJSON() ([]byte, error) {
type NoMethod Method
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MetricDescriptor: Defines a metric type and its schema. Once a metric
// descriptor is created, deleting or altering it stops data collection
// and makes the metric type's existing data unusable.
type MetricDescriptor struct {
// Description: A detailed description of the metric, which can be used
// in documentation.
Description string `json:"description,omitempty"`
// DisplayName: A concise name for the metric, which can be displayed in
// user interfaces. Use sentence case without an ending period, for
// example "Request count". This field is optional but it is recommended
// to be set for any metrics associated with user-visible concepts, such
// as Quota.
DisplayName string `json:"displayName,omitempty"`
// Labels: The set of labels that can be used to describe a specific
// instance of this metric type. For example, the
// `appengine.googleapis.com/http/server/response_latencies` metric type
// has a label for the HTTP response code, `response_code`, so you can
// look at latencies for successful responses or just for responses that
// failed.
Labels []*LabelDescriptor `json:"labels,omitempty"`
// LaunchStage: Optional. The launch stage of the metric definition.
//
// Possible values:
// "LAUNCH_STAGE_UNSPECIFIED" - Do not use this default value.
// "UNIMPLEMENTED" - The feature is not yet implemented. Users can not
// use it.
// "PRELAUNCH" - Prelaunch features are hidden from users and are only
// visible internally.
// "EARLY_ACCESS" - Early Access features are limited to a closed
// group of testers. To use these features, you must sign up in advance
// and sign a Trusted Tester agreement (which includes confidentiality
// provisions). These features may be unstable, changed in
// backward-incompatible ways, and are not guaranteed to be released.
// "ALPHA" - Alpha is a limited availability test for releases before
// they are cleared for widespread use. By Alpha, all significant design
// issues are resolved and we are in the process of verifying
// functionality. Alpha customers need to apply for access, agree to
// applicable terms, and have their projects allowlisted. Alpha releases
// don’t have to be feature complete, no SLAs are provided, and there
// are no technical support obligations, but they will be far enough
// along that customers can actually use them in test environments or
// for limited-use tests -- just like they would in normal production
// cases.
// "BETA" - Beta is the point at which we are ready to open a release
// for any customer to use. There are no SLA or technical support
// obligations in a Beta release. Products will be complete from a
// feature perspective, but may have some open outstanding issues. Beta
// releases are suitable for limited production use cases.
// "GA" - GA features are open to all developers and are considered
// stable and fully qualified for production use.
// "DEPRECATED" - Deprecated features are scheduled to be shut down
// and removed. For more information, see the “Deprecation Policy”
// section of our [Terms of Service](https://cloud.google.com/terms/)
// and the [Google Cloud Platform Subject to the Deprecation
// Policy](https://cloud.google.com/terms/deprecation) documentation.
LaunchStage string `json:"launchStage,omitempty"`
// Metadata: Optional. Metadata which can be used to guide usage of the
// metric.
Metadata *MetricDescriptorMetadata `json:"metadata,omitempty"`
// MetricKind: Whether the metric records instantaneous values, changes
// to a value, etc. Some combinations of `metric_kind` and `value_type`
// might not be supported.
//
// Possible values:
// "METRIC_KIND_UNSPECIFIED" - Do not use this default value.
// "GAUGE" - An instantaneous measurement of a value.
// "DELTA" - The change in a value during a time interval.
// "CUMULATIVE" - A value accumulated over a time interval. Cumulative
// measurements in a time series should have the same start time and
// increasing end times, until an event resets the cumulative value to
// zero and sets a new start time for the following points.
MetricKind string `json:"metricKind,omitempty"`
// MonitoredResourceTypes: Read-only. If present, then a time series,
// which is identified partially by a metric type and a
// MonitoredResourceDescriptor, that is associated with this metric type
// can only be associated with one of the monitored resource types
// listed here.
MonitoredResourceTypes []string `json:"monitoredResourceTypes,omitempty"`
// Name: The resource name of the metric descriptor.
Name string `json:"name,omitempty"`
// Type: The metric type, including its DNS name prefix. The type is not
// URL-encoded. All user-defined metric types have the DNS name
// `custom.googleapis.com` or `external.googleapis.com`. Metric types
// should use a natural hierarchical grouping. For example:
// "custom.googleapis.com/invoice/paid/amount"
// "external.googleapis.com/prometheus/up"
// "appengine.googleapis.com/http/server/response_latencies"
Type string `json:"type,omitempty"`
// Unit: The units in which the metric value is reported. It is only
// applicable if the `value_type` is `INT64`, `DOUBLE`, or
// `DISTRIBUTION`. The `unit` defines the representation of the stored
// metric values. Different systems might scale the values to be more
// easily displayed (so a value of `0.02kBy` _might_ be displayed as
// `20By`, and a value of `3523kBy` _might_ be displayed as `3.5MBy`).
// However, if the `unit` is `kBy`, then the value of the metric is
// always in thousands of bytes, no matter how it might be displayed. If
// you want a custom metric to record the exact number of CPU-seconds
// used by a job, you can create an `INT64 CUMULATIVE` metric whose
// `unit` is `s{CPU}` (or equivalently `1s{CPU}` or just `s`). If the
// job uses 12,005 CPU-seconds, then the value is written as `12005`.
// Alternatively, if you want a custom metric to record data in a more
// granular way, you can create a `DOUBLE CUMULATIVE` metric whose
// `unit` is `ks{CPU}`, and then write the value `12.005` (which is
// `12005/1000`), or use `Kis{CPU}` and write `11.723` (which is
// `12005/1024`). The supported units are a subset of The Unified Code
// for Units of Measure (https://unitsofmeasure.org/ucum.html) standard:
// **Basic units (UNIT)** * `bit` bit * `By` byte * `s` second * `min`
// minute * `h` hour * `d` day * `1` dimensionless **Prefixes (PREFIX)**
// * `k` kilo (10^3) * `M` mega (10^6) * `G` giga (10^9) * `T` tera
// (10^12) * `P` peta (10^15) * `E` exa (10^18) * `Z` zetta (10^21) *
// `Y` yotta (10^24) * `m` milli (10^-3) * `u` micro (10^-6) * `n` nano
// (10^-9) * `p` pico (10^-12) * `f` femto (10^-15) * `a` atto (10^-18)
// * `z` zepto (10^-21) * `y` yocto (10^-24) * `Ki` kibi (2^10) * `Mi`
// mebi (2^20) * `Gi` gibi (2^30) * `Ti` tebi (2^40) * `Pi` pebi (2^50)
// **Grammar** The grammar also includes these connectors: * `/`
// division or ratio (as an infix operator). For examples, `kBy/{email}`
// or `MiBy/10ms` (although you should almost never have `/s` in a
// metric `unit`; rates should always be computed at query time from the
// underlying cumulative or delta value). * `.` multiplication or
// composition (as an infix operator). For examples, `GBy.d` or
// `k{watt}.h`. The grammar for a unit is as follows: Expression =
// Component { "." Component } { "/" Component } ; Component = ( [
// PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation
// = "{" NAME "}" ; Notes: * `Annotation` is just a comment if it
// follows a `UNIT`. If the annotation is used alone, then the unit is
// equivalent to `1`. For examples, `{request}/s == 1/s`,
// `By{transmitted}/s == By/s`. * `NAME` is a sequence of non-blank
// printable ASCII characters not containing `{` or `}`. * `1`
// represents a unitary dimensionless unit
// (https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as
// in `1/s`. It is typically used when none of the basic units are
// appropriate. For example, "new users per day" can be represented as
// `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new
// users). Alternatively, "thousands of page views per day" would be
// represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric
// value of `5.3` would mean "5300 page views per day"). * `%`
// represents dimensionless value of 1/100, and annotates values giving
// a percentage (so the metric values are typically in the range of
// 0..100, and a metric value `3` means "3 percent"). * `10^2.%`
// indicates a metric contains a ratio, typically in the range 0..1,
// that will be multiplied by 100 and displayed as a percentage (so a
// metric value `0.03` means "3 percent").
Unit string `json:"unit,omitempty"`
// ValueType: Whether the measurement is an integer, a floating-point
// number, etc. Some combinations of `metric_kind` and `value_type`
// might not be supported.
//
// Possible values:
// "VALUE_TYPE_UNSPECIFIED" - Do not use this default value.
// "BOOL" - The value is a boolean. This value type can be used only
// if the metric kind is `GAUGE`.
// "INT64" - The value is a signed 64-bit integer.
// "DOUBLE" - The value is a double precision floating point number.
// "STRING" - The value is a text string. This value type can be used
// only if the metric kind is `GAUGE`.
// "DISTRIBUTION" - The value is a `Distribution`.
// "MONEY" - The value is money.
ValueType string `json:"valueType,omitempty"`
// ForceSendFields is a list of field names (e.g. "Description") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Description") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *MetricDescriptor) MarshalJSON() ([]byte, error) {
type NoMethod MetricDescriptor
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MetricDescriptorMetadata: Additional annotations that can be used to
// guide the usage of a metric.
type MetricDescriptorMetadata struct {
// IngestDelay: The delay of data points caused by ingestion. Data
// points older than this age are guaranteed to be ingested and
// available to be read, excluding data loss due to errors.
IngestDelay string `json:"ingestDelay,omitempty"`
// LaunchStage: Deprecated. Must use the MetricDescriptor.launch_stage
// instead.
//
// Possible values:
// "LAUNCH_STAGE_UNSPECIFIED" - Do not use this default value.
// "UNIMPLEMENTED" - The feature is not yet implemented. Users can not
// use it.
// "PRELAUNCH" - Prelaunch features are hidden from users and are only
// visible internally.
// "EARLY_ACCESS" - Early Access features are limited to a closed
// group of testers. To use these features, you must sign up in advance
// and sign a Trusted Tester agreement (which includes confidentiality
// provisions). These features may be unstable, changed in
// backward-incompatible ways, and are not guaranteed to be released.
// "ALPHA" - Alpha is a limited availability test for releases before
// they are cleared for widespread use. By Alpha, all significant design
// issues are resolved and we are in the process of verifying
// functionality. Alpha customers need to apply for access, agree to
// applicable terms, and have their projects allowlisted. Alpha releases
// don’t have to be feature complete, no SLAs are provided, and there
// are no technical support obligations, but they will be far enough
// along that customers can actually use them in test environments or
// for limited-use tests -- just like they would in normal production
// cases.
// "BETA" - Beta is the point at which we are ready to open a release
// for any customer to use. There are no SLA or technical support
// obligations in a Beta release. Products will be complete from a
// feature perspective, but may have some open outstanding issues. Beta
// releases are suitable for limited production use cases.
// "GA" - GA features are open to all developers and are considered
// stable and fully qualified for production use.
// "DEPRECATED" - Deprecated features are scheduled to be shut down
// and removed. For more information, see the “Deprecation Policy”
// section of our [Terms of Service](https://cloud.google.com/terms/)
// and the [Google Cloud Platform Subject to the Deprecation
// Policy](https://cloud.google.com/terms/deprecation) documentation.
LaunchStage string `json:"launchStage,omitempty"`
// SamplePeriod: The sampling period of metric data points. For metrics
// which are written periodically, consecutive data points are stored at
// this time interval, excluding data loss due to errors. Metrics with a
// higher granularity have a smaller sampling period.
SamplePeriod string `json:"samplePeriod,omitempty"`
// ForceSendFields is a list of field names (e.g. "IngestDelay") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "IngestDelay") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *MetricDescriptorMetadata) MarshalJSON() ([]byte, error) {
type NoMethod MetricDescriptorMetadata
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MetricRule: Bind API methods to metrics. Binding a method to a metric
// causes that metric's configured quota behaviors to apply to the
// method call.
type MetricRule struct {
// MetricCosts: Metrics to update when the selected methods are called,
// and the associated cost applied to each metric. The key of the map is
// the metric name, and the values are the amount increased for the
// metric against which the quota limits are defined. The value must not
// be negative.
MetricCosts map[string]string `json:"metricCosts,omitempty"`
// Selector: Selects the methods to which this rule applies. Refer to
// selector for syntax details.
Selector string `json:"selector,omitempty"`
// ForceSendFields is a list of field names (e.g. "MetricCosts") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "MetricCosts") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *MetricRule) MarshalJSON() ([]byte, error) {
type NoMethod MetricRule
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Mixin: Declares an API Interface to be included in this interface.
// The including interface must redeclare all the methods from the
// included interface, but documentation and options are inherited as
// follows: - If after comment and whitespace stripping, the
// documentation string of the redeclared method is empty, it will be
// inherited from the original method. - Each annotation belonging to
// the service config (http, visibility) which is not set in the
// redeclared method will be inherited. - If an http annotation is
// inherited, the path pattern will be modified as follows. Any version
// prefix will be replaced by the version of the including interface
// plus the root path if specified. Example of a simple mixin: package
// google.acl.v1; service AccessControl { // Get the underlying ACL
// object. rpc GetAcl(GetAclRequest) returns (Acl) { option
// (google.api.http).get = "/v1/{resource=**}:getAcl"; } } package
// google.storage.v2; service Storage { // rpc GetAcl(GetAclRequest)
// returns (Acl); // Get a data record. rpc GetData(GetDataRequest)
// returns (Data) { option (google.api.http).get = "/v2/{resource=**}";
// } } Example of a mixin configuration: apis: - name:
// google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl
// The mixin construct implies that all methods in `AccessControl` are
// also declared with same name and request/response types in `Storage`.
// A documentation generator or annotation processor will see the
// effective `Storage.GetAcl` method after inheriting documentation and
// annotations as follows: service Storage { // Get the underlying ACL
// object. rpc GetAcl(GetAclRequest) returns (Acl) { option
// (google.api.http).get = "/v2/{resource=**}:getAcl"; } ... } Note how
// the version in the path pattern changed from `v1` to `v2`. If the
// `root` field in the mixin is specified, it should be a relative path
// under which inherited HTTP paths are placed. Example: apis: - name:
// google.storage.v2.Storage mixins: - name: google.acl.v1.AccessControl
// root: acls This implies the following inherited HTTP annotation:
// service Storage { // Get the underlying ACL object. rpc
// GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get =
// "/v2/acls/{resource=**}:getAcl"; } ... }
type Mixin struct {
// Name: The fully qualified name of the interface which is included.
Name string `json:"name,omitempty"`
// Root: If non-empty specifies a path under which inherited HTTP paths
// are rooted.
Root string `json:"root,omitempty"`
// ForceSendFields is a list of field names (e.g. "Name") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Name") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Mixin) MarshalJSON() ([]byte, error) {
type NoMethod Mixin
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MonitoredResourceDescriptor: An object that describes the schema of a
// MonitoredResource object using a type name and a set of labels. For
// example, the monitored resource descriptor for Google Compute Engine
// VM instances has a type of "gce_instance" and specifies the use of
// the labels "instance_id" and "zone" to identify particular VM
// instances. Different APIs can support different monitored resource
// types. APIs generally provide a `list` method that returns the
// monitored resource descriptors used by the API.
type MonitoredResourceDescriptor struct {
// Description: Optional. A detailed description of the monitored
// resource type that might be used in documentation.
Description string `json:"description,omitempty"`
// DisplayName: Optional. A concise name for the monitored resource type
// that might be displayed in user interfaces. It should be a Title
// Cased Noun Phrase, without any article or other determiners. For
// example, "Google Cloud SQL Database".
DisplayName string `json:"displayName,omitempty"`
// Labels: Required. A set of labels used to describe instances of this
// monitored resource type. For example, an individual Google Cloud SQL
// database is identified by values for the labels "database_id" and
// "zone".
Labels []*LabelDescriptor `json:"labels,omitempty"`
// LaunchStage: Optional. The launch stage of the monitored resource
// definition.
//
// Possible values:
// "LAUNCH_STAGE_UNSPECIFIED" - Do not use this default value.
// "UNIMPLEMENTED" - The feature is not yet implemented. Users can not
// use it.
// "PRELAUNCH" - Prelaunch features are hidden from users and are only
// visible internally.
// "EARLY_ACCESS" - Early Access features are limited to a closed
// group of testers. To use these features, you must sign up in advance
// and sign a Trusted Tester agreement (which includes confidentiality
// provisions). These features may be unstable, changed in
// backward-incompatible ways, and are not guaranteed to be released.
// "ALPHA" - Alpha is a limited availability test for releases before
// they are cleared for widespread use. By Alpha, all significant design
// issues are resolved and we are in the process of verifying
// functionality. Alpha customers need to apply for access, agree to
// applicable terms, and have their projects allowlisted. Alpha releases
// don’t have to be feature complete, no SLAs are provided, and there
// are no technical support obligations, but they will be far enough
// along that customers can actually use them in test environments or
// for limited-use tests -- just like they would in normal production
// cases.
// "BETA" - Beta is the point at which we are ready to open a release
// for any customer to use. There are no SLA or technical support
// obligations in a Beta release. Products will be complete from a
// feature perspective, but may have some open outstanding issues. Beta
// releases are suitable for limited production use cases.
// "GA" - GA features are open to all developers and are considered
// stable and fully qualified for production use.
// "DEPRECATED" - Deprecated features are scheduled to be shut down
// and removed. For more information, see the “Deprecation Policy”
// section of our [Terms of Service](https://cloud.google.com/terms/)
// and the [Google Cloud Platform Subject to the Deprecation
// Policy](https://cloud.google.com/terms/deprecation) documentation.
LaunchStage string `json:"launchStage,omitempty"`
// Name: Optional. The resource name of the monitored resource
// descriptor:
// "projects/{project_id}/monitoredResourceDescriptors/{type}" where
// {type} is the value of the `type` field in this object and
// {project_id} is a project ID that provides API-specific context for
// accessing the type. APIs that do not use project information can use
// the resource name format "monitoredResourceDescriptors/{type}".
Name string `json:"name,omitempty"`
// Type: Required. The monitored resource type. For example, the type
// "cloudsql_database" represents databases in Google Cloud SQL. For a
// list of types, see Monitoring resource types
// (https://cloud.google.com/monitoring/api/resources) and Logging
// resource types
// (https://cloud.google.com/logging/docs/api/v2/resource-list).
Type string `json:"type,omitempty"`
// ForceSendFields is a list of field names (e.g. "Description") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Description") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *MonitoredResourceDescriptor) MarshalJSON() ([]byte, error) {
type NoMethod MonitoredResourceDescriptor
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Monitoring: Monitoring configuration of the service. The example
// below shows how to configure monitored resources and metrics for
// monitoring. In the example, a monitored resource and two metrics are
// defined. The `library.googleapis.com/book/returned_count` metric is
// sent to both producer and consumer projects, whereas the
// `library.googleapis.com/book/num_overdue` metric is only sent to the
// consumer project. monitored_resources: - type:
// library.googleapis.com/Branch display_name: "Library Branch"
// description: "A branch of a library." launch_stage: GA labels: - key:
// resource_container description: "The Cloud container (ie. project id)
// for the Branch." - key: location description: "The location of the
// library branch." - key: branch_id description: "The id of the
// branch." metrics: - name: library.googleapis.com/book/returned_count
// display_name: "Books Returned" description: "The count of books that
// have been returned." launch_stage: GA metric_kind: DELTA value_type:
// INT64 unit: "1" labels: - key: customer_id description: "The id of
// the customer." - name: library.googleapis.com/book/num_overdue
// display_name: "Books Overdue" description: "The current number of
// overdue books." launch_stage: GA metric_kind: GAUGE value_type: INT64
// unit: "1" labels: - key: customer_id description: "The id of the
// customer." monitoring: producer_destinations: - monitored_resource:
// library.googleapis.com/Branch metrics: -
// library.googleapis.com/book/returned_count consumer_destinations: -
// monitored_resource: library.googleapis.com/Branch metrics: -
// library.googleapis.com/book/returned_count -
// library.googleapis.com/book/num_overdue
type Monitoring struct {
// ConsumerDestinations: Monitoring configurations for sending metrics
// to the consumer project. There can be multiple consumer destinations.
// A monitored resource type may appear in multiple monitoring
// destinations if different aggregations are needed for different sets
// of metrics associated with that monitored resource type. A monitored
// resource and metric pair may only be used once in the Monitoring
// configuration.
ConsumerDestinations []*MonitoringDestination `json:"consumerDestinations,omitempty"`
// ProducerDestinations: Monitoring configurations for sending metrics
// to the producer project. There can be multiple producer destinations.
// A monitored resource type may appear in multiple monitoring
// destinations if different aggregations are needed for different sets
// of metrics associated with that monitored resource type. A monitored
// resource and metric pair may only be used once in the Monitoring
// configuration.
ProducerDestinations []*MonitoringDestination `json:"producerDestinations,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "ConsumerDestinations") to unconditionally include in API requests.
// By default, fields with empty or default values are omitted from API
// requests. However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ConsumerDestinations") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *Monitoring) MarshalJSON() ([]byte, error) {
type NoMethod Monitoring
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MonitoringDestination: Configuration of a specific monitoring
// destination (the producer project or the consumer project).
type MonitoringDestination struct {
// Metrics: Types of the metrics to report to this monitoring
// destination. Each type must be defined in Service.metrics section.
Metrics []string `json:"metrics,omitempty"`
// MonitoredResource: The monitored resource type. The type must be
// defined in Service.monitored_resources section.
MonitoredResource string `json:"monitoredResource,omitempty"`
// ForceSendFields is a list of field names (e.g. "Metrics") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Metrics") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *MonitoringDestination) MarshalJSON() ([]byte, error) {
type NoMethod MonitoringDestination
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// OAuthRequirements: OAuth scopes are a way to define data and
// permissions on data. For example, there are scopes defined for
// "Read-only access to Google Calendar" and "Access to Cloud Platform".
// Users can consent to a scope for an application, giving it permission
// to access that data on their behalf. OAuth scope specifications
// should be fairly coarse grained; a user will need to see and
// understand the text description of what your scope means. In most
// cases: use one or at most two OAuth scopes for an entire family of
// products. If your product has multiple APIs, you should probably be
// sharing the OAuth scope across all of those APIs. When you need finer
// grained OAuth consent screens: talk with your product management
// about how developers will use them in practice. Please note that even
// though each of the canonical scopes is enough for a request to be
// accepted and passed to the backend, a request can still fail due to
// the backend requiring additional scopes or permissions.
type OAuthRequirements struct {
// CanonicalScopes: The list of publicly documented OAuth scopes that
// are allowed access. An OAuth token containing any of these scopes
// will be accepted. Example: canonical_scopes:
// https://www.googleapis.com/auth/calendar,
// https://www.googleapis.com/auth/calendar.read
CanonicalScopes string `json:"canonicalScopes,omitempty"`
// ForceSendFields is a list of field names (e.g. "CanonicalScopes") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "CanonicalScopes") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *OAuthRequirements) MarshalJSON() ([]byte, error) {
type NoMethod OAuthRequirements
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Operation: This resource represents a long-running operation that is
// the result of a network API call.
type Operation struct {
// Done: If the value is `false`, it means the operation is still in
// progress. If `true`, the operation is completed, and either `error`
// or `response` is available.
Done bool `json:"done,omitempty"`
// Error: The error result of the operation in case of failure or
// cancellation.
Error *Status `json:"error,omitempty"`
// Metadata: Service-specific metadata associated with the operation. It
// typically contains progress information and common metadata such as
// create time. Some services might not provide such metadata. Any
// method that returns a long-running operation should document the
// metadata type, if any.
Metadata googleapi.RawMessage `json:"metadata,omitempty"`
// Name: The server-assigned name, which is only unique within the same
// service that originally returns it. If you use the default HTTP
// mapping, the `name` should be a resource name ending with
// `operations/{unique_id}`.
Name string `json:"name,omitempty"`
// Response: The normal response of the operation in case of success. If
// the original method returns no data on success, such as `Delete`, the
// response is `google.protobuf.Empty`. If the original method is
// standard `Get`/`Create`/`Update`, the response should be the
// resource. For other methods, the response should have the type
// `XxxResponse`, where `Xxx` is the original method name. For example,
// if the original method name is `TakeSnapshot()`, the inferred
// response type is `TakeSnapshotResponse`.
Response googleapi.RawMessage `json:"response,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Done") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Done") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Operation) MarshalJSON() ([]byte, error) {
type NoMethod Operation
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// OperationInfo: A message representing the message types used by a
// long-running operation. Example: rpc Export(ExportRequest) returns
// (google.longrunning.Operation) { option
// (google.longrunning.operation_info) = { response_type:
// "ExportResponse" metadata_type: "ExportMetadata" }; }
type OperationInfo struct {
// MetadataType: Required. The message name of the metadata type for
// this long-running operation. If the response is in a different
// package from the rpc, a fully-qualified message name must be used
// (e.g. `google.protobuf.Struct`). Note: Altering this value
// constitutes a breaking change.
MetadataType string `json:"metadataType,omitempty"`
// ResponseType: Required. The message name of the primary return type
// for this long-running operation. This type will be used to
// deserialize the LRO's response. If the response is in a different
// package from the rpc, a fully-qualified message name must be used
// (e.g. `google.protobuf.Struct`). Note: Altering this value
// constitutes a breaking change.
ResponseType string `json:"responseType,omitempty"`
// ForceSendFields is a list of field names (e.g. "MetadataType") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "MetadataType") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *OperationInfo) MarshalJSON() ([]byte, error) {
type NoMethod OperationInfo
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// OperationMetadata: The metadata associated with a long running
// operation resource.
type OperationMetadata struct {
// ProgressPercentage: Percentage of completion of this operation,
// ranging from 0 to 100.
ProgressPercentage int64 `json:"progressPercentage,omitempty"`
// ResourceNames: The full name of the resources that this operation is
// directly associated with.
ResourceNames []string `json:"resourceNames,omitempty"`
// StartTime: The start time of the operation.
StartTime string `json:"startTime,omitempty"`
// Steps: Detailed status information for each step. The order is
// undetermined.
Steps []*Step `json:"steps,omitempty"`
// ForceSendFields is a list of field names (e.g. "ProgressPercentage")
// to unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ProgressPercentage") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *OperationMetadata) MarshalJSON() ([]byte, error) {
type NoMethod OperationMetadata
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Option: A protocol buffer option, which can be attached to a message,
// field, enumeration, etc.
type Option struct {
// Name: The option's name. For protobuf built-in options (options
// defined in descriptor.proto), this is the short name. For example,
// "map_entry". For custom options, it should be the fully-qualified
// name. For example, "google.api.http".
Name string `json:"name,omitempty"`
// Value: The option's value packed in an Any message. If the value is a
// primitive, the corresponding wrapper type defined in
// google/protobuf/wrappers.proto should be used. If the value is an
// enum, it should be stored as an int32 value using the
// google.protobuf.Int32Value type.
Value googleapi.RawMessage `json:"value,omitempty"`
// ForceSendFields is a list of field names (e.g. "Name") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Name") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Option) MarshalJSON() ([]byte, error) {
type NoMethod Option
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Page: Represents a documentation page. A page can contain subpages to
// represent nested documentation set structure.
type Page struct {
// Content: The Markdown content of the page. You can use (== include
// {path} ==) to include content from a Markdown file. The content can
// be used to produce the documentation page such as HTML format page.
Content string `json:"content,omitempty"`
// Name: The name of the page. It will be used as an identity of the
// page to generate URI of the page, text of the link to this page in
// navigation, etc. The full page name (start from the root page name to
// this page concatenated with `.`) can be used as reference to the page
// in your documentation. For example: pages: - name: Tutorial content:
// (== include tutorial.md ==) subpages: - name: Java content: (==
// include tutorial_java.md ==) You can reference `Java` page using
// Markdown reference link syntax: `Java`.
Name string `json:"name,omitempty"`
// Subpages: Subpages of this page. The order of subpages specified here
// will be honored in the generated docset.
Subpages []*Page `json:"subpages,omitempty"`
// ForceSendFields is a list of field names (e.g. "Content") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Content") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Page) MarshalJSON() ([]byte, error) {
type NoMethod Page
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Policy: An Identity and Access Management (IAM) policy, which
// specifies access controls for Google Cloud resources. A `Policy` is a
// collection of `bindings`. A `binding` binds one or more `members` to
// a single `role`. Members can be user accounts, service accounts,
// Google groups, and domains (such as G Suite). A `role` is a named
// list of permissions; each `role` can be an IAM predefined role or a
// user-created custom role. For some types of Google Cloud resources, a
// `binding` can also specify a `condition`, which is a logical
// expression that allows access to a resource only if the expression
// evaluates to `true`. A condition can add constraints based on
// attributes of the request, the resource, or both. To learn which
// resources support conditions in their IAM policies, see the IAM
// documentation
// (https://cloud.google.com/iam/help/conditions/resource-policies).
// **JSON example:** { "bindings": [ { "role":
// "roles/resourcemanager.organizationAdmin", "members": [
// "user:[email protected]", "group:[email protected]",
// "domain:google.com",
// "serviceAccount:[email protected]" ] }, {
// "role": "roles/resourcemanager.organizationViewer", "members": [
// "user:[email protected]" ], "condition": { "title": "expirable access",
// "description": "Does not grant access after Sep 2020", "expression":
// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ],
// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: -
// members: - user:[email protected] - group:[email protected] -
// domain:google.com -
// serviceAccount:[email protected] role:
// roles/resourcemanager.organizationAdmin - members: -
// user:[email protected] role: roles/resourcemanager.organizationViewer
// condition: title: expirable access description: Does not grant access
// after Sep 2020 expression: request.time <
// timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3
// For a description of IAM and its features, see the IAM documentation
// (https://cloud.google.com/iam/docs/).
type Policy struct {
// AuditConfigs: Specifies cloud audit logging configuration for this
// policy.
AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"`
// Bindings: Associates a list of `members` to a `role`. Optionally, may
// specify a `condition` that determines how and when the `bindings` are
// applied. Each of the `bindings` must contain at least one member.
Bindings []*Binding `json:"bindings,omitempty"`
// Etag: `etag` is used for optimistic concurrency control as a way to
// help prevent simultaneous updates of a policy from overwriting each
// other. It is strongly suggested that systems make use of the `etag`
// in the read-modify-write cycle to perform policy updates in order to
// avoid race conditions: An `etag` is returned in the response to
// `getIamPolicy`, and systems are expected to put that etag in the
// request to `setIamPolicy` to ensure that their change will be applied
// to the same version of the policy. **Important:** If you use IAM
// Conditions, you must include the `etag` field whenever you call
// `setIamPolicy`. If you omit this field, then IAM allows you to
// overwrite a version `3` policy with a version `1` policy, and all of
// the conditions in the version `3` policy are lost.
Etag string `json:"etag,omitempty"`
// Version: Specifies the format of the policy. Valid values are `0`,
// `1`, and `3`. Requests that specify an invalid value are rejected.
// Any operation that affects conditional role bindings must specify
// version `3`. This requirement applies to the following operations: *
// Getting a policy that includes a conditional role binding * Adding a
// conditional role binding to a policy * Changing a conditional role
// binding in a policy * Removing any role binding, with or without a
// condition, from a policy that includes conditions **Important:** If
// you use IAM Conditions, you must include the `etag` field whenever
// you call `setIamPolicy`. If you omit this field, then IAM allows you
// to overwrite a version `3` policy with a version `1` policy, and all
// of the conditions in the version `3` policy are lost. If a policy
// does not include any conditions, operations on that policy may
// specify any valid version or leave the field unset. To learn which
// resources support conditions in their IAM policies, see the IAM
// documentation
// (https://cloud.google.com/iam/help/conditions/resource-policies).
Version int64 `json:"version,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "AuditConfigs") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AuditConfigs") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Policy) MarshalJSON() ([]byte, error) {
type NoMethod Policy
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Quota: Quota configuration helps to achieve fairness and budgeting in
// service usage. The metric based quota configuration works this way: -
// The service configuration defines a set of metrics. - For API calls,
// the quota.metric_rules maps methods to metrics with corresponding
// costs. - The quota.limits defines limits on the metrics, which will
// be used for quota checks at runtime. An example quota configuration
// in yaml format: quota: limits: - name: apiWriteQpsPerProject metric:
// library.googleapis.com/write_calls unit: "1/min/{project}" # rate
// limit for consumer projects values: STANDARD: 10000 # The metric
// rules bind all methods to the read_calls metric, # except for the
// UpdateBook and DeleteBook methods. These two methods # are mapped to
// the write_calls metric, with the UpdateBook method # consuming at
// twice rate as the DeleteBook method. metric_rules: - selector: "*"
// metric_costs: library.googleapis.com/read_calls: 1 - selector:
// google.example.library.v1.LibraryService.UpdateBook metric_costs:
// library.googleapis.com/write_calls: 2 - selector:
// google.example.library.v1.LibraryService.DeleteBook metric_costs:
// library.googleapis.com/write_calls: 1 Corresponding Metric
// definition: metrics: - name: library.googleapis.com/read_calls
// display_name: Read requests metric_kind: DELTA value_type: INT64 -
// name: library.googleapis.com/write_calls display_name: Write requests
// metric_kind: DELTA value_type: INT64
type Quota struct {
// Limits: List of `QuotaLimit` definitions for the service.
Limits []*QuotaLimit `json:"limits,omitempty"`
// MetricRules: List of `MetricRule` definitions, each one mapping a
// selected method to one or more metrics.
MetricRules []*MetricRule `json:"metricRules,omitempty"`
// ForceSendFields is a list of field names (e.g. "Limits") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Limits") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Quota) MarshalJSON() ([]byte, error) {
type NoMethod Quota
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// QuotaLimit: `QuotaLimit` defines a specific limit that applies over a
// specified duration for a limit type. There can be at most one limit
// for a duration and limit type combination defined within a
// `QuotaGroup`.
type QuotaLimit struct {
// DefaultLimit: Default number of tokens that can be consumed during
// the specified duration. This is the number of tokens assigned when a
// client application developer activates the service for his/her
// project. Specifying a value of 0 will block all requests. This can be
// used if you are provisioning quota to selected consumers and blocking
// others. Similarly, a value of -1 will indicate an unlimited quota. No
// other negative values are allowed. Used by group-based quotas only.
DefaultLimit int64 `json:"defaultLimit,omitempty,string"`
// Description: Optional. User-visible, extended description for this
// quota limit. Should be used only when more context is needed to
// understand this limit than provided by the limit's display name (see:
// `display_name`).
Description string `json:"description,omitempty"`
// DisplayName: User-visible display name for this limit. Optional. If
// not set, the UI will provide a default display name based on the
// quota configuration. This field can be used to override the default
// display name generated from the configuration.
DisplayName string `json:"displayName,omitempty"`
// Duration: Duration of this limit in textual notation. Must be "100s"
// or "1d". Used by group-based quotas only.
Duration string `json:"duration,omitempty"`
// FreeTier: Free tier value displayed in the Developers Console for
// this limit. The free tier is the number of tokens that will be
// subtracted from the billed amount when billing is enabled. This field
// can only be set on a limit with duration "1d", in a billable group;
// it is invalid on any other limit. If this field is not set, it
// defaults to 0, indicating that there is no free tier for this
// service. Used by group-based quotas only.
FreeTier int64 `json:"freeTier,omitempty,string"`
// MaxLimit: Maximum number of tokens that can be consumed during the
// specified duration. Client application developers can override the
// default limit up to this maximum. If specified, this value cannot be
// set to a value less than the default limit. If not specified, it is
// set to the default limit. To allow clients to apply overrides with no
// upper bound, set this to -1, indicating unlimited maximum quota. Used
// by group-based quotas only.
MaxLimit int64 `json:"maxLimit,omitempty,string"`
// Metric: The name of the metric this quota limit applies to. The quota
// limits with the same metric will be checked together during runtime.
// The metric must be defined within the service config.
Metric string `json:"metric,omitempty"`
// Name: Name of the quota limit. The name must be provided, and it must
// be unique within the service. The name can only include alphanumeric
// characters as well as '-'. The maximum length of the limit name is 64
// characters.
Name string `json:"name,omitempty"`
// Unit: Specify the unit of the quota limit. It uses the same syntax as
// Metric.unit. The supported unit kinds are determined by the quota
// backend system. Here are some examples: * "1/min/{project}" for quota
// per minute per project. Note: the order of unit components is
// insignificant. The "1" at the beginning is required to follow the
// metric unit syntax.
Unit string `json:"unit,omitempty"`
// Values: Tiered limit values. You must specify this as a key:value
// pair, with an integer value that is the maximum number of requests
// allowed for the specified unit. Currently only STANDARD is supported.
Values map[string]string `json:"values,omitempty"`
// ForceSendFields is a list of field names (e.g. "DefaultLimit") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DefaultLimit") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *QuotaLimit) MarshalJSON() ([]byte, error) {
type NoMethod QuotaLimit
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ResourceReference: Defines a proto annotation that describes a string
// field that refers to an API resource.
type ResourceReference struct {
// ChildType: The resource type of a child collection that the annotated
// field references. This is useful for annotating the `parent` field
// that doesn't have a fixed resource type. Example: message
// ListLogEntriesRequest { string parent = 1
// [(google.api.resource_reference) = { child_type:
// "logging.googleapis.com/LogEntry" }; }
ChildType string `json:"childType,omitempty"`
// Type: The resource type that the annotated field references. Example:
// message Subscription { string topic = 2
// [(google.api.resource_reference) = { type:
// "pubsub.googleapis.com/Topic" }]; } Occasionally, a field may
// reference an arbitrary resource. In this case, APIs use the special
// value * in their resource reference. Example: message
// GetIamPolicyRequest { string resource = 2
// [(google.api.resource_reference) = { type: "*" }]; }
Type string `json:"type,omitempty"`
// ForceSendFields is a list of field names (e.g. "ChildType") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ChildType") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ResourceReference) MarshalJSON() ([]byte, error) {
type NoMethod ResourceReference
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Rollout: A rollout resource that defines how service configuration
// versions are pushed to control plane systems. Typically, you create a
// new version of the service config, and then create a Rollout to push
// the service config.
type Rollout struct {
// CreateTime: Creation time of the rollout. Readonly.
CreateTime string `json:"createTime,omitempty"`
// CreatedBy: This field is deprecated and will be deleted. Please
// remove usage of this field.
CreatedBy string `json:"createdBy,omitempty"`
// DeleteServiceStrategy: The strategy associated with a rollout to
// delete a `ManagedService`. Readonly.
DeleteServiceStrategy *DeleteServiceStrategy `json:"deleteServiceStrategy,omitempty"`
// RolloutId: Optional. Unique identifier of this Rollout. Must be no
// longer than 63 characters and only lower case letters, digits, '.',
// '_' and '-' are allowed. If not specified by client, the server will
// generate one. The generated id will have the form of , where "date"
// is the create date in ISO 8601 format. "revision number" is a
// monotonically increasing positive number that is reset every day for
// each service. An example of the generated rollout_id is
// '2016-02-16r1'
RolloutId string `json:"rolloutId,omitempty"`
// ServiceName: The name of the service associated with this Rollout.
ServiceName string `json:"serviceName,omitempty"`
// Status: The status of this rollout. Readonly. In case of a failed
// rollout, the system will automatically rollback to the current
// Rollout version. Readonly.
//
// Possible values:
// "ROLLOUT_STATUS_UNSPECIFIED" - No status specified.
// "IN_PROGRESS" - The Rollout is in progress.
// "SUCCESS" - The Rollout has completed successfully.
// "CANCELLED" - The Rollout has been cancelled. This can happen if
// you have overlapping Rollout pushes, and the previous ones will be
// cancelled.
// "FAILED" - The Rollout has failed and the rollback attempt has
// failed too.
// "PENDING" - The Rollout has not started yet and is pending for
// execution.
// "FAILED_ROLLED_BACK" - The Rollout has failed and rolled back to
// the previous successful Rollout.
Status string `json:"status,omitempty"`
// TrafficPercentStrategy: Google Service Control selects service
// configurations based on traffic percentage.
TrafficPercentStrategy *TrafficPercentStrategy `json:"trafficPercentStrategy,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "CreateTime") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "CreateTime") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Rollout) MarshalJSON() ([]byte, error) {
type NoMethod Rollout
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Service: `Service` is the root object of Google API service
// configuration (service config). It describes the basic information
// about a logical service, such as the service name and the user-facing
// title, and delegates other aspects to sub-sections. Each sub-section
// is either a proto message or a repeated proto message that configures
// a specific aspect, such as auth. For more information, see each proto
// message definition. Example: type: google.api.Service name:
// calendar.googleapis.com title: Google Calendar API apis: - name:
// google.calendar.v3.Calendar visibility: rules: - selector:
// "google.calendar.v3.*" restriction: PREVIEW backend: rules: -
// selector: "google.calendar.v3.*" address: calendar.example.com
// authentication: providers: - id: google_calendar_auth jwks_uri:
// https://www.googleapis.com/oauth2/v1/certs issuer:
// https://securetoken.google.com rules: - selector: "*" requirements:
// provider_id: google_calendar_auth
type Service struct {
// Apis: A list of API interfaces exported by this service. Only the
// `name` field of the google.protobuf.Api needs to be provided by the
// configuration author, as the remaining fields will be derived from
// the IDL during the normalization process. It is an error to specify
// an API interface here which cannot be resolved against the associated
// IDL files.
Apis []*Api `json:"apis,omitempty"`
// Authentication: Auth configuration.
Authentication *Authentication `json:"authentication,omitempty"`
// Backend: API backend configuration.
Backend *Backend `json:"backend,omitempty"`
// Billing: Billing configuration.
Billing *Billing `json:"billing,omitempty"`
// ConfigVersion: Obsolete. Do not use. This field has no semantic
// meaning. The service config compiler always sets this field to `3`.
ConfigVersion int64 `json:"configVersion,omitempty"`
// Context: Context configuration.
Context *Context `json:"context,omitempty"`
// Control: Configuration for the service control plane.
Control *Control `json:"control,omitempty"`
// CustomError: Custom error configuration.
CustomError *CustomError `json:"customError,omitempty"`
// Documentation: Additional API documentation.
Documentation *Documentation `json:"documentation,omitempty"`
// Endpoints: Configuration for network endpoints. If this is empty,
// then an endpoint with the same name as the service is automatically
// generated to service all defined APIs.
Endpoints []*Endpoint `json:"endpoints,omitempty"`
// Enums: A list of all enum types included in this API service. Enums
// referenced directly or indirectly by the `apis` are automatically
// included. Enums which are not referenced but shall be included should
// be listed here by name by the configuration author. Example: enums: -
// name: google.someapi.v1.SomeEnum
Enums []*Enum `json:"enums,omitempty"`
// Http: HTTP configuration.
Http *Http `json:"http,omitempty"`
// Id: A unique ID for a specific instance of this message, typically
// assigned by the client for tracking purpose. Must be no longer than
// 63 characters and only lower case letters, digits, '.', '_' and '-'
// are allowed. If empty, the server may choose to generate one instead.
Id string `json:"id,omitempty"`
// Logging: Logging configuration.
Logging *Logging `json:"logging,omitempty"`
// Logs: Defines the logs used by this service.
Logs []*LogDescriptor `json:"logs,omitempty"`
// Metrics: Defines the metrics used by this service.
Metrics []*MetricDescriptor `json:"metrics,omitempty"`
// MonitoredResources: Defines the monitored resources used by this
// service. This is required by the Service.monitoring and
// Service.logging configurations.
MonitoredResources []*MonitoredResourceDescriptor `json:"monitoredResources,omitempty"`
// Monitoring: Monitoring configuration.
Monitoring *Monitoring `json:"monitoring,omitempty"`
// Name: The service name, which is a DNS-like logical identifier for
// the service, such as `calendar.googleapis.com`. The service name
// typically goes through DNS verification to make sure the owner of the
// service also owns the DNS name.
Name string `json:"name,omitempty"`
// ProducerProjectId: The Google project that owns this service.
ProducerProjectId string `json:"producerProjectId,omitempty"`
// Quota: Quota configuration.
Quota *Quota `json:"quota,omitempty"`
// SourceInfo: Output only. The source information for this
// configuration if available.
SourceInfo *SourceInfo `json:"sourceInfo,omitempty"`
// SystemParameters: System parameter configuration.
SystemParameters *SystemParameters `json:"systemParameters,omitempty"`
// SystemTypes: A list of all proto message types included in this API
// service. It serves similar purpose as [google.api.Service.types],
// except that these types are not needed by user-defined APIs.
// Therefore, they will not show up in the generated discovery doc. This
// field should only be used to define system APIs in ESF.
SystemTypes []*Type `json:"systemTypes,omitempty"`
// Title: The product title for this service, it is the name displayed
// in Google Cloud Console.
Title string `json:"title,omitempty"`
// Types: A list of all proto message types included in this API
// service. Types referenced directly or indirectly by the `apis` are
// automatically included. Messages which are not referenced but shall
// be included, such as types used by the `google.protobuf.Any` type,
// should be listed here by name by the configuration author. Example:
// types: - name: google.protobuf.Int32
Types []*Type `json:"types,omitempty"`
// Usage: Configuration controlling usage of this service.
Usage *Usage `json:"usage,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Apis") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Apis") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Service) MarshalJSON() ([]byte, error) {
type NoMethod Service
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SetIamPolicyRequest: Request message for `SetIamPolicy` method.
type SetIamPolicyRequest struct {
// Policy: REQUIRED: The complete policy to be applied to the
// `resource`. The size of the policy is limited to a few 10s of KB. An
// empty policy is a valid policy but certain Cloud Platform services
// (such as Projects) might reject them.
Policy *Policy `json:"policy,omitempty"`
// UpdateMask: OPTIONAL: A FieldMask specifying which fields of the
// policy to modify. Only the fields in the mask will be modified. If no
// mask is provided, the following default mask is used: `paths:
// "bindings, etag"
UpdateMask string `json:"updateMask,omitempty"`
// ForceSendFields is a list of field names (e.g. "Policy") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Policy") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) {
type NoMethod SetIamPolicyRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SourceContext: `SourceContext` represents information about the
// source of a protobuf element, like the file in which it is defined.
type SourceContext struct {
// FileName: The path-qualified name of the .proto file that contained
// the associated protobuf element. For example:
// "google/protobuf/source_context.proto".
FileName string `json:"fileName,omitempty"`
// ForceSendFields is a list of field names (e.g. "FileName") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "FileName") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SourceContext) MarshalJSON() ([]byte, error) {
type NoMethod SourceContext
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SourceInfo: Source information used to create a Service Config
type SourceInfo struct {
// SourceFiles: All files used during config generation.
SourceFiles []googleapi.RawMessage `json:"sourceFiles,omitempty"`
// ForceSendFields is a list of field names (e.g. "SourceFiles") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "SourceFiles") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SourceInfo) MarshalJSON() ([]byte, error) {
type NoMethod SourceInfo
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Status: The `Status` type defines a logical error model that is
// suitable for different programming environments, including REST APIs
// and RPC APIs. It is used by gRPC (https://github.com/grpc). Each
// `Status` message contains three pieces of data: error code, error
// message, and error details. You can find out more about this error
// model and how to work with it in the API Design Guide
// (https://cloud.google.com/apis/design/errors).
type Status struct {
// Code: The status code, which should be an enum value of
// google.rpc.Code.
Code int64 `json:"code,omitempty"`
// Details: A list of messages that carry the error details. There is a
// common set of message types for APIs to use.
Details []googleapi.RawMessage `json:"details,omitempty"`
// Message: A developer-facing error message, which should be in
// English. Any user-facing error message should be localized and sent
// in the google.rpc.Status.details field, or localized by the client.
Message string `json:"message,omitempty"`
// ForceSendFields is a list of field names (e.g. "Code") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Code") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Status) MarshalJSON() ([]byte, error) {
type NoMethod Status
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Step: Represents the status of one operation step.
type Step struct {
// Description: The short description of the step.
Description string `json:"description,omitempty"`
// Status: The status code.
//
// Possible values:
// "STATUS_UNSPECIFIED" - Unspecifed code.
// "DONE" - The operation or step has completed without errors.
// "NOT_STARTED" - The operation or step has not started yet.
// "IN_PROGRESS" - The operation or step is in progress.
// "FAILED" - The operation or step has completed with errors. If the
// operation is rollbackable, the rollback completed with errors too.
// "CANCELLED" - The operation or step has completed with
// cancellation.
Status string `json:"status,omitempty"`
// ForceSendFields is a list of field names (e.g. "Description") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Description") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Step) MarshalJSON() ([]byte, error) {
type NoMethod Step
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SubmitConfigSourceRequest: Request message for SubmitConfigSource
// method.
type SubmitConfigSourceRequest struct {
// ConfigSource: Required. The source configuration for the service.
ConfigSource *ConfigSource `json:"configSource,omitempty"`
// ValidateOnly: Optional. If set, this will result in the generation of
// a `google.api.Service` configuration based on the `ConfigSource`
// provided, but the generated config and the sources will NOT be
// persisted.
ValidateOnly bool `json:"validateOnly,omitempty"`
// ForceSendFields is a list of field names (e.g. "ConfigSource") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ConfigSource") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SubmitConfigSourceRequest) MarshalJSON() ([]byte, error) {
type NoMethod SubmitConfigSourceRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SubmitConfigSourceResponse: Response message for SubmitConfigSource
// method.
type SubmitConfigSourceResponse struct {
// ServiceConfig: The generated service configuration.
ServiceConfig *Service `json:"serviceConfig,omitempty"`
// ForceSendFields is a list of field names (e.g. "ServiceConfig") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ServiceConfig") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SubmitConfigSourceResponse) MarshalJSON() ([]byte, error) {
type NoMethod SubmitConfigSourceResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SystemParameter: Define a parameter's name and location. The
// parameter may be passed as either an HTTP header or a URL query
// parameter, and if both are passed the behavior is
// implementation-dependent.
type SystemParameter struct {
// HttpHeader: Define the HTTP header name to use for the parameter. It
// is case insensitive.
HttpHeader string `json:"httpHeader,omitempty"`
// Name: Define the name of the parameter, such as "api_key" . It is
// case sensitive.
Name string `json:"name,omitempty"`
// UrlQueryParameter: Define the URL query parameter name to use for the
// parameter. It is case sensitive.
UrlQueryParameter string `json:"urlQueryParameter,omitempty"`
// ForceSendFields is a list of field names (e.g. "HttpHeader") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "HttpHeader") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SystemParameter) MarshalJSON() ([]byte, error) {
type NoMethod SystemParameter
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SystemParameterRule: Define a system parameter rule mapping system
// parameter definitions to methods.
type SystemParameterRule struct {
// Parameters: Define parameters. Multiple names may be defined for a
// parameter. For a given method call, only one of them should be used.
// If multiple names are used the behavior is implementation-dependent.
// If none of the specified names are present the behavior is
// parameter-dependent.
Parameters []*SystemParameter `json:"parameters,omitempty"`
// Selector: Selects the methods to which this rule applies. Use '*' to
// indicate all methods in all APIs. Refer to selector for syntax
// details.
Selector string `json:"selector,omitempty"`
// ForceSendFields is a list of field names (e.g. "Parameters") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Parameters") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SystemParameterRule) MarshalJSON() ([]byte, error) {
type NoMethod SystemParameterRule
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SystemParameters: ### System parameter configuration A system
// parameter is a special kind of parameter defined by the API system,
// not by an individual API. It is typically mapped to an HTTP header
// and/or a URL query parameter. This configuration specifies which
// methods change the names of the system parameters.
type SystemParameters struct {
// Rules: Define system parameters. The parameters defined here will
// override the default parameters implemented by the system. If this
// field is missing from the service config, default system parameters
// will be used. Default system parameters and names is
// implementation-dependent. Example: define api key for all methods
// system_parameters rules: - selector: "*" parameters: - name: api_key
// url_query_parameter: api_key Example: define 2 api key names for a
// specific method. system_parameters rules: - selector: "/ListShelves"
// parameters: - name: api_key http_header: Api-Key1 - name: api_key
// http_header: Api-Key2 **NOTE:** All service configuration rules
// follow "last one wins" order.
Rules []*SystemParameterRule `json:"rules,omitempty"`
// ForceSendFields is a list of field names (e.g. "Rules") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Rules") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SystemParameters) MarshalJSON() ([]byte, error) {
type NoMethod SystemParameters
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// TestIamPermissionsRequest: Request message for `TestIamPermissions`
// method.
type TestIamPermissionsRequest struct {
// Permissions: The set of permissions to check for the `resource`.
// Permissions with wildcards (such as '*' or 'storage.*') are not
// allowed. For more information see IAM Overview
// (https://cloud.google.com/iam/docs/overview#permissions).
Permissions []string `json:"permissions,omitempty"`
// ForceSendFields is a list of field names (e.g. "Permissions") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Permissions") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) {
type NoMethod TestIamPermissionsRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// TestIamPermissionsResponse: Response message for `TestIamPermissions`
// method.
type TestIamPermissionsResponse struct {
// Permissions: A subset of `TestPermissionsRequest.permissions` that
// the caller is allowed.
Permissions []string `json:"permissions,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Permissions") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Permissions") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) {
type NoMethod TestIamPermissionsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// TrafficPercentStrategy: Strategy that specifies how clients of Google
// Service Controller want to send traffic to use different config
// versions. This is generally used by API proxy to split traffic based
// on your configured percentage for each config version. One example of
// how to gradually rollout a new service configuration using this
// strategy: Day 1 Rollout { id:
// "example.googleapis.com/rollout_20160206" traffic_percent_strategy {
// percentages: { "example.googleapis.com/20160201": 70.00
// "example.googleapis.com/20160206": 30.00 } } } Day 2 Rollout { id:
// "example.googleapis.com/rollout_20160207" traffic_percent_strategy: {
// percentages: { "example.googleapis.com/20160206": 100.00 } } }
type TrafficPercentStrategy struct {
// Percentages: Maps service configuration IDs to their corresponding
// traffic percentage. Key is the service configuration ID, Value is the
// traffic percentage which must be greater than 0.0 and the sum must
// equal to 100.0.
Percentages map[string]float64 `json:"percentages,omitempty"`
// ForceSendFields is a list of field names (e.g. "Percentages") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Percentages") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *TrafficPercentStrategy) MarshalJSON() ([]byte, error) {
type NoMethod TrafficPercentStrategy
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Type: A protocol buffer message type.
type Type struct {
// Fields: The list of fields.
Fields []*Field `json:"fields,omitempty"`
// Name: The fully qualified message name.
Name string `json:"name,omitempty"`
// Oneofs: The list of types appearing in `oneof` definitions in this
// type.
Oneofs []string `json:"oneofs,omitempty"`
// Options: The protocol buffer options.
Options []*Option `json:"options,omitempty"`
// SourceContext: The source context.
SourceContext *SourceContext `json:"sourceContext,omitempty"`
// Syntax: The source syntax.
//
// Possible values:
// "SYNTAX_PROTO2" - Syntax `proto2`.
// "SYNTAX_PROTO3" - Syntax `proto3`.
Syntax string `json:"syntax,omitempty"`
// ForceSendFields is a list of field names (e.g. "Fields") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Fields") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Type) MarshalJSON() ([]byte, error) {
type NoMethod Type
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// UndeleteServiceResponse: Response message for UndeleteService method.
type UndeleteServiceResponse struct {
// Service: Revived service resource.
Service *ManagedService `json:"service,omitempty"`
// ForceSendFields is a list of field names (e.g. "Service") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Service") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *UndeleteServiceResponse) MarshalJSON() ([]byte, error) {
type NoMethod UndeleteServiceResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Usage: Configuration controlling usage of a service.
type Usage struct {
// ProducerNotificationChannel: The full resource name of a channel used
// for sending notifications to the service producer. Google Service
// Management currently only supports Google Cloud Pub/Sub
// (https://cloud.google.com/pubsub) as a notification channel. To use
// Google Cloud Pub/Sub as the channel, this must be the name of a Cloud
// Pub/Sub topic that uses the Cloud Pub/Sub topic name format
// documented in https://cloud.google.com/pubsub/docs/overview.
ProducerNotificationChannel string `json:"producerNotificationChannel,omitempty"`
// Requirements: Requirements that must be satisfied before a consumer
// project can use the service. Each requirement is of the form /; for
// example 'serviceusage.googleapis.com/billing-enabled'. For Google
// APIs, a Terms of Service requirement must be included here. Google
// Cloud APIs must include "serviceusage.googleapis.com/tos/cloud".
// Other Google APIs should include
// "serviceusage.googleapis.com/tos/universal". Additional ToS can be
// included based on the business needs.
Requirements []string `json:"requirements,omitempty"`
// Rules: A list of usage rules that apply to individual API methods.
// **NOTE:** All service configuration rules follow "last one wins"
// order.
Rules []*UsageRule `json:"rules,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "ProducerNotificationChannel") to unconditionally include in API
// requests. By default, fields with empty or default values are omitted
// from API requests. However, any non-pointer, non-interface field
// appearing in ForceSendFields will be sent to the server regardless of
// whether the field is empty or not. This may be used to include empty
// fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g.
// "ProducerNotificationChannel") to include in API requests with the
// JSON null value. By default, fields with empty values are omitted
// from API requests. However, any field with an empty value appearing
// in NullFields will be sent to the server as null. It is an error if a
// field in this list has a non-empty value. This may be used to include
// null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Usage) MarshalJSON() ([]byte, error) {
type NoMethod Usage
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// UsageRule: Usage configuration rules for the service. NOTE: Under
// development. Use this rule to configure unregistered calls for the
// service. Unregistered calls are calls that do not contain consumer
// project identity. (Example: calls that do not contain an API key). By
// default, API methods do not allow unregistered calls, and each method
// call must be identified by a consumer project identity. Use this rule
// to allow/disallow unregistered calls. Example of an API that wants to
// allow unregistered calls for entire service. usage: rules: -
// selector: "*" allow_unregistered_calls: true Example of a method that
// wants to allow unregistered calls. usage: rules: - selector:
// "google.example.library.v1.LibraryService.CreateBook"
// allow_unregistered_calls: true
type UsageRule struct {
// AllowUnregisteredCalls: If true, the selected method allows
// unregistered calls, e.g. calls that don't identify any user or
// application.
AllowUnregisteredCalls bool `json:"allowUnregisteredCalls,omitempty"`
// Selector: Selects the methods to which this rule applies. Use '*' to
// indicate all methods in all APIs. Refer to selector for syntax
// details.
Selector string `json:"selector,omitempty"`
// SkipServiceControl: If true, the selected method should skip service
// control and the control plane features, such as quota and billing,
// will not be available. This flag is used by Google Cloud Endpoints to
// bypass checks for internal methods, such as service health check
// methods.
SkipServiceControl bool `json:"skipServiceControl,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "AllowUnregisteredCalls") to unconditionally include in API requests.
// By default, fields with empty or default values are omitted from API
// requests. However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AllowUnregisteredCalls")
// to include in API requests with the JSON null value. By default,
// fields with empty values are omitted from API requests. However, any
// field with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *UsageRule) MarshalJSON() ([]byte, error) {
type NoMethod UsageRule
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// method id "servicemanagement.operations.get":
type OperationsGetCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets the latest state of a long-running operation. Clients can
// use this method to poll the operation result at intervals as
// recommended by the API service.
//
// - name: The name of the operation resource.
func (r *OperationsService) Get(name string) *OperationsGetCall {
c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *OperationsGetCall) Fields(s ...googleapi.Field) *OperationsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *OperationsGetCall) IfNoneMatch(entityTag string) *OperationsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *OperationsGetCall) Context(ctx context.Context) *OperationsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *OperationsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.operations.get" call.
// Exactly one of *Operation or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.",
// "flatPath": "v1/operations/{operationsId}",
// "httpMethod": "GET",
// "id": "servicemanagement.operations.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The name of the operation resource.",
// "location": "path",
// "pattern": "^operations/.*$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+name}",
// "response": {
// "$ref": "Operation"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/service.management"
// ]
// }
}
// method id "servicemanagement.operations.list":
type OperationsListCall struct {
s *APIService
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists service operations that match the specified filter in the
// request.
func (r *OperationsService) List() *OperationsListCall {
c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
return c
}
// Filter sets the optional parameter "filter": A string for filtering
// Operations. The following filter fields are supported: * serviceName:
// Required. Only `=` operator is allowed. * startTime: The time this
// job was started, in ISO 8601 format. Allowed operators are `>=`, `>`,
// `<=`, and `<`. * status: Can be `done`, `in_progress`, or `failed`.
// Allowed operators are `=`, and `!=`. Filter expression supports
// conjunction (AND) and disjunction (OR) logical operators. However,
// the serviceName restriction must be at the top-level and can only be
// combined with other restrictions via the AND logical operator.
// Examples: * `serviceName={some-service}.googleapis.com` *
// `serviceName={some-service}.googleapis.com AND
// startTime>="2017-02-01" * `serviceName={some-service}.googleapis.com
// AND status=done` * `serviceName={some-service}.googleapis.com AND
// (status=done OR startTime>="2017-02-01")`
func (c *OperationsListCall) Filter(filter string) *OperationsListCall {
c.urlParams_.Set("filter", filter)
return c
}
// Name sets the optional parameter "name": Not used.
func (c *OperationsListCall) Name(name string) *OperationsListCall {
c.urlParams_.Set("name", name)
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of operations to return. If unspecified, defaults to 50. The maximum
// value is 100.
func (c *OperationsListCall) PageSize(pageSize int64) *OperationsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": The standard list
// page token.
func (c *OperationsListCall) PageToken(pageToken string) *OperationsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *OperationsListCall) Fields(s ...googleapi.Field) *OperationsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *OperationsListCall) IfNoneMatch(entityTag string) *OperationsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *OperationsListCall) Context(ctx context.Context) *OperationsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *OperationsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/operations")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.operations.list" call.
// Exactly one of *ListOperationsResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListOperationsResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListOperationsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists service operations that match the specified filter in the request.",
// "flatPath": "v1/operations",
// "httpMethod": "GET",
// "id": "servicemanagement.operations.list",
// "parameterOrder": [],
// "parameters": {
// "filter": {
// "description": "A string for filtering Operations. The following filter fields are supported: * serviceName: Required. Only `=` operator is allowed. * startTime: The time this job was started, in ISO 8601 format. Allowed operators are `\u003e=`, `\u003e`, `\u003c=`, and `\u003c`. * status: Can be `done`, `in_progress`, or `failed`. Allowed operators are `=`, and `!=`. Filter expression supports conjunction (AND) and disjunction (OR) logical operators. However, the serviceName restriction must be at the top-level and can only be combined with other restrictions via the AND logical operator. Examples: * `serviceName={some-service}.googleapis.com` * `serviceName={some-service}.googleapis.com AND startTime\u003e=\"2017-02-01\"` * `serviceName={some-service}.googleapis.com AND status=done` * `serviceName={some-service}.googleapis.com AND (status=done OR startTime\u003e=\"2017-02-01\")`",
// "location": "query",
// "type": "string"
// },
// "name": {
// "description": "Not used.",
// "location": "query",
// "type": "string"
// },
// "pageSize": {
// "description": "The maximum number of operations to return. If unspecified, defaults to 50. The maximum value is 100.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "The standard list page token.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v1/operations",
// "response": {
// "$ref": "ListOperationsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/service.management"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *OperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "servicemanagement.services.create":
type ServicesCreateCall struct {
s *APIService
managedservice *ManagedService
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a new managed service. A managed service is
// immutable, and is subject to mandatory 30-day data retention. You
// cannot move a service or recreate it within 30 days after deletion.
// One producer project can own no more than 500 services. For security
// and reliability purposes, a production service should be hosted in a
// dedicated producer project. Operation
func (r *ServicesService) Create(managedservice *ManagedService) *ServicesCreateCall {
c := &ServicesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.managedservice = managedservice
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesCreateCall) Fields(s ...googleapi.Field) *ServicesCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesCreateCall) Context(ctx context.Context) *ServicesCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedservice)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.services.create" call.
// Exactly one of *Operation or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ServicesCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a new managed service. A managed service is immutable, and is subject to mandatory 30-day data retention. You cannot move a service or recreate it within 30 days after deletion. One producer project can own no more than 500 services. For security and reliability purposes, a production service should be hosted in a dedicated producer project. Operation",
// "flatPath": "v1/services",
// "httpMethod": "POST",
// "id": "servicemanagement.services.create",
// "parameterOrder": [],
// "parameters": {},
// "path": "v1/services",
// "request": {
// "$ref": "ManagedService"
// },
// "response": {
// "$ref": "Operation"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/service.management"
// ]
// }
}
// method id "servicemanagement.services.delete":
type ServicesDeleteCall struct {
s *APIService
serviceName string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes a managed service. This method will change the
// service to the `Soft-Delete` state for 30 days. Within this period,
// service producers may call UndeleteService to restore the service.
// After 30 days, the service will be permanently deleted. Operation
//
// - serviceName: The name of the service. See the overview
// (/service-management/overview) for naming requirements. For
// example: `example.googleapis.com`.
func (r *ServicesService) Delete(serviceName string) *ServicesDeleteCall {
c := &ServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.serviceName = serviceName
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesDeleteCall) Fields(s ...googleapi.Field) *ServicesDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesDeleteCall) Context(ctx context.Context) *ServicesDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services/{serviceName}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"serviceName": c.serviceName,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.services.delete" call.
// Exactly one of *Operation or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes a managed service. This method will change the service to the `Soft-Delete` state for 30 days. Within this period, service producers may call UndeleteService to restore the service. After 30 days, the service will be permanently deleted. Operation",
// "flatPath": "v1/services/{serviceName}",
// "httpMethod": "DELETE",
// "id": "servicemanagement.services.delete",
// "parameterOrder": [
// "serviceName"
// ],
// "parameters": {
// "serviceName": {
// "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/services/{serviceName}",
// "response": {
// "$ref": "Operation"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/service.management"
// ]
// }
}
// method id "servicemanagement.services.generateConfigReport":
type ServicesGenerateConfigReportCall struct {
s *APIService
generateconfigreportrequest *GenerateConfigReportRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// GenerateConfigReport: Generates and returns a report (errors,
// warnings and changes from existing configurations) associated with
// GenerateConfigReportRequest.new_value If
// GenerateConfigReportRequest.old_value is specified,
// GenerateConfigReportRequest will contain a single ChangeReport based
// on the comparison between GenerateConfigReportRequest.new_value and
// GenerateConfigReportRequest.old_value. If
// GenerateConfigReportRequest.old_value is not specified, this method
// will compare GenerateConfigReportRequest.new_value with the last
// pushed service configuration.
func (r *ServicesService) GenerateConfigReport(generateconfigreportrequest *GenerateConfigReportRequest) *ServicesGenerateConfigReportCall {
c := &ServicesGenerateConfigReportCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.generateconfigreportrequest = generateconfigreportrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesGenerateConfigReportCall) Fields(s ...googleapi.Field) *ServicesGenerateConfigReportCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesGenerateConfigReportCall) Context(ctx context.Context) *ServicesGenerateConfigReportCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesGenerateConfigReportCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesGenerateConfigReportCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.generateconfigreportrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services:generateConfigReport")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.services.generateConfigReport" call.
// Exactly one of *GenerateConfigReportResponse or error will be
// non-nil. Any non-2xx status code is an error. Response headers are in
// either *GenerateConfigReportResponse.ServerResponse.Header or (if a
// response was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ServicesGenerateConfigReportCall) Do(opts ...googleapi.CallOption) (*GenerateConfigReportResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &GenerateConfigReportResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Generates and returns a report (errors, warnings and changes from existing configurations) associated with GenerateConfigReportRequest.new_value If GenerateConfigReportRequest.old_value is specified, GenerateConfigReportRequest will contain a single ChangeReport based on the comparison between GenerateConfigReportRequest.new_value and GenerateConfigReportRequest.old_value. If GenerateConfigReportRequest.old_value is not specified, this method will compare GenerateConfigReportRequest.new_value with the last pushed service configuration.",
// "flatPath": "v1/services:generateConfigReport",
// "httpMethod": "POST",
// "id": "servicemanagement.services.generateConfigReport",
// "parameterOrder": [],
// "parameters": {},
// "path": "v1/services:generateConfigReport",
// "request": {
// "$ref": "GenerateConfigReportRequest"
// },
// "response": {
// "$ref": "GenerateConfigReportResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/service.management"
// ]
// }
}
// method id "servicemanagement.services.get":
type ServicesGetCall struct {
s *APIService
serviceName string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a managed service. Authentication is required unless the
// service is public.
//
// - serviceName: The name of the service. See the `ServiceManager`
// overview for naming requirements. For example:
// `example.googleapis.com`.
func (r *ServicesService) Get(serviceName string) *ServicesGetCall {
c := &ServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.serviceName = serviceName
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesGetCall) Fields(s ...googleapi.Field) *ServicesGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ServicesGetCall) IfNoneMatch(entityTag string) *ServicesGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesGetCall) Context(ctx context.Context) *ServicesGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services/{serviceName}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"serviceName": c.serviceName,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.services.get" call.
// Exactly one of *ManagedService or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *ManagedService.ServerResponse.Header or (if a response was returned
// at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ServicesGetCall) Do(opts ...googleapi.CallOption) (*ManagedService, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ManagedService{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a managed service. Authentication is required unless the service is public.",
// "flatPath": "v1/services/{serviceName}",
// "httpMethod": "GET",
// "id": "servicemanagement.services.get",
// "parameterOrder": [
// "serviceName"
// ],
// "parameters": {
// "serviceName": {
// "description": "Required. The name of the service. See the `ServiceManager` overview for naming requirements. For example: `example.googleapis.com`.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/services/{serviceName}",
// "response": {
// "$ref": "ManagedService"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/service.management",
// "https://www.googleapis.com/auth/service.management.readonly"
// ]
// }
}
// method id "servicemanagement.services.getConfig":
type ServicesGetConfigCall struct {
s *APIService
serviceName string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// GetConfig: Gets a service configuration (version) for a managed
// service.
//
// - serviceName: The name of the service. See the overview
// (/service-management/overview) for naming requirements. For
// example: `example.googleapis.com`.
func (r *ServicesService) GetConfig(serviceName string) *ServicesGetConfigCall {
c := &ServicesGetConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.serviceName = serviceName
return c
}
// ConfigId sets the optional parameter "configId": Required. The id of
// the service configuration resource. This field must be specified for
// the server to return all fields, including `SourceInfo`.
func (c *ServicesGetConfigCall) ConfigId(configId string) *ServicesGetConfigCall {
c.urlParams_.Set("configId", configId)
return c
}
// View sets the optional parameter "view": Specifies which parts of the
// Service Config should be returned in the response.
//
// Possible values:
// "BASIC" - Server response includes all fields except SourceInfo.
// "FULL" - Server response includes all fields including SourceInfo.
// SourceFiles are of type 'google.api.servicemanagement.v1.ConfigFile'
// and are only available for configs created using the
// SubmitConfigSource method.
func (c *ServicesGetConfigCall) View(view string) *ServicesGetConfigCall {
c.urlParams_.Set("view", view)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesGetConfigCall) Fields(s ...googleapi.Field) *ServicesGetConfigCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ServicesGetConfigCall) IfNoneMatch(entityTag string) *ServicesGetConfigCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesGetConfigCall) Context(ctx context.Context) *ServicesGetConfigCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesGetConfigCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesGetConfigCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services/{serviceName}/config")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"serviceName": c.serviceName,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.services.getConfig" call.
// Exactly one of *Service or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Service.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ServicesGetConfigCall) Do(opts ...googleapi.CallOption) (*Service, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Service{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a service configuration (version) for a managed service.",
// "flatPath": "v1/services/{serviceName}/config",
// "httpMethod": "GET",
// "id": "servicemanagement.services.getConfig",
// "parameterOrder": [
// "serviceName"
// ],
// "parameters": {
// "configId": {
// "description": "Required. The id of the service configuration resource. This field must be specified for the server to return all fields, including `SourceInfo`.",
// "location": "query",
// "type": "string"
// },
// "serviceName": {
// "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "view": {
// "description": "Specifies which parts of the Service Config should be returned in the response.",
// "enum": [
// "BASIC",
// "FULL"
// ],
// "enumDescriptions": [
// "Server response includes all fields except SourceInfo.",
// "Server response includes all fields including SourceInfo. SourceFiles are of type 'google.api.servicemanagement.v1.ConfigFile' and are only available for configs created using the SubmitConfigSource method."
// ],
// "location": "query",
// "type": "string"
// }
// },
// "path": "v1/services/{serviceName}/config",
// "response": {
// "$ref": "Service"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/service.management",
// "https://www.googleapis.com/auth/service.management.readonly"
// ]
// }
}
// method id "servicemanagement.services.getIamPolicy":
type ServicesGetIamPolicyCall struct {
s *APIService
resource string
getiampolicyrequest *GetIamPolicyRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// GetIamPolicy: Gets the access control policy for a resource. Returns
// an empty policy if the resource exists and does not have a policy
// set.
//
// - resource: REQUIRED: The resource for which the policy is being
// requested. See the operation documentation for the appropriate
// value for this field.
func (r *ServicesService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ServicesGetIamPolicyCall {
c := &ServicesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.resource = resource
c.getiampolicyrequest = getiampolicyrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesGetIamPolicyCall) Fields(s ...googleapi.Field) *ServicesGetIamPolicyCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesGetIamPolicyCall) Context(ctx context.Context) *ServicesGetIamPolicyCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesGetIamPolicyCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"resource": c.resource,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.services.getIamPolicy" call.
// Exactly one of *Policy or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Policy.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ServicesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Policy{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.",
// "flatPath": "v1/services/{servicesId}:getIamPolicy",
// "httpMethod": "POST",
// "id": "servicemanagement.services.getIamPolicy",
// "parameterOrder": [
// "resource"
// ],
// "parameters": {
// "resource": {
// "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.",
// "location": "path",
// "pattern": "^services/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+resource}:getIamPolicy",
// "request": {
// "$ref": "GetIamPolicyRequest"
// },
// "response": {
// "$ref": "Policy"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/service.management",
// "https://www.googleapis.com/auth/service.management.readonly"
// ]
// }
}
// method id "servicemanagement.services.list":
type ServicesListCall struct {
s *APIService
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists managed services. Returns all public services. For
// authenticated users, also returns all services the calling user has
// "servicemanagement.services.get" permission for.
func (r *ServicesService) List() *ServicesListCall {
c := &ServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
return c
}
// ConsumerId sets the optional parameter "consumerId": Include services
// consumed by the specified consumer. The Google Service Management
// implementation accepts the following forms: - project:
func (c *ServicesListCall) ConsumerId(consumerId string) *ServicesListCall {
c.urlParams_.Set("consumerId", consumerId)
return c
}
// PageSize sets the optional parameter "pageSize": The max number of
// items to include in the response list. Page size is 50 if not
// specified. Maximum value is 100.
func (c *ServicesListCall) PageSize(pageSize int64) *ServicesListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": Token identifying
// which result to start with; returned by a previous list call.
func (c *ServicesListCall) PageToken(pageToken string) *ServicesListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// ProducerProjectId sets the optional parameter "producerProjectId":
// Include services produced by the specified project.
func (c *ServicesListCall) ProducerProjectId(producerProjectId string) *ServicesListCall {
c.urlParams_.Set("producerProjectId", producerProjectId)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesListCall) Fields(s ...googleapi.Field) *ServicesListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ServicesListCall) IfNoneMatch(entityTag string) *ServicesListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesListCall) Context(ctx context.Context) *ServicesListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.services.list" call.
// Exactly one of *ListServicesResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListServicesResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ServicesListCall) Do(opts ...googleapi.CallOption) (*ListServicesResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListServicesResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists managed services. Returns all public services. For authenticated users, also returns all services the calling user has \"servicemanagement.services.get\" permission for.",
// "flatPath": "v1/services",
// "httpMethod": "GET",
// "id": "servicemanagement.services.list",
// "parameterOrder": [],
// "parameters": {
// "consumerId": {
// "description": "Include services consumed by the specified consumer. The Google Service Management implementation accepts the following forms: - project:",
// "location": "query",
// "type": "string"
// },
// "pageSize": {
// "description": "The max number of items to include in the response list. Page size is 50 if not specified. Maximum value is 100.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "Token identifying which result to start with; returned by a previous list call.",
// "location": "query",
// "type": "string"
// },
// "producerProjectId": {
// "description": "Include services produced by the specified project.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v1/services",
// "response": {
// "$ref": "ListServicesResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/service.management",
// "https://www.googleapis.com/auth/service.management.readonly"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ServicesListCall) Pages(ctx context.Context, f func(*ListServicesResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "servicemanagement.services.setIamPolicy":
type ServicesSetIamPolicyCall struct {
s *APIService
resource string
setiampolicyrequest *SetIamPolicyRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// SetIamPolicy: Sets the access control policy on the specified
// resource. Replaces any existing policy. Can return `NOT_FOUND`,
// `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
//
// - resource: REQUIRED: The resource for which the policy is being
// specified. See the operation documentation for the appropriate
// value for this field.
func (r *ServicesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ServicesSetIamPolicyCall {
c := &ServicesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.resource = resource
c.setiampolicyrequest = setiampolicyrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesSetIamPolicyCall) Fields(s ...googleapi.Field) *ServicesSetIamPolicyCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesSetIamPolicyCall) Context(ctx context.Context) *ServicesSetIamPolicyCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesSetIamPolicyCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"resource": c.resource,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.services.setIamPolicy" call.
// Exactly one of *Policy or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Policy.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ServicesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Policy{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.",
// "flatPath": "v1/services/{servicesId}:setIamPolicy",
// "httpMethod": "POST",
// "id": "servicemanagement.services.setIamPolicy",
// "parameterOrder": [
// "resource"
// ],
// "parameters": {
// "resource": {
// "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.",
// "location": "path",
// "pattern": "^services/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+resource}:setIamPolicy",
// "request": {
// "$ref": "SetIamPolicyRequest"
// },
// "response": {
// "$ref": "Policy"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/service.management"
// ]
// }
}
// method id "servicemanagement.services.testIamPermissions":
type ServicesTestIamPermissionsCall struct {
s *APIService
resource string
testiampermissionsrequest *TestIamPermissionsRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// TestIamPermissions: Returns permissions that a caller has on the
// specified resource. If the resource does not exist, this will return
// an empty set of permissions, not a `NOT_FOUND` error. Note: This
// operation is designed to be used for building permission-aware UIs
// and command-line tools, not for authorization checking. This
// operation may "fail open" without warning.
//
// - resource: REQUIRED: The resource for which the policy detail is
// being requested. See the operation documentation for the
// appropriate value for this field.
func (r *ServicesService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ServicesTestIamPermissionsCall {
c := &ServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.resource = resource
c.testiampermissionsrequest = testiampermissionsrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ServicesTestIamPermissionsCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesTestIamPermissionsCall) Context(ctx context.Context) *ServicesTestIamPermissionsCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesTestIamPermissionsCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"resource": c.resource,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.services.testIamPermissions" call.
// Exactly one of *TestIamPermissionsResponse or error will be non-nil.
// Any non-2xx status code is an error. Response headers are in either
// *TestIamPermissionsResponse.ServerResponse.Header or (if a response
// was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &TestIamPermissionsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.",
// "flatPath": "v1/services/{servicesId}:testIamPermissions",
// "httpMethod": "POST",
// "id": "servicemanagement.services.testIamPermissions",
// "parameterOrder": [
// "resource"
// ],
// "parameters": {
// "resource": {
// "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.",
// "location": "path",
// "pattern": "^services/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+resource}:testIamPermissions",
// "request": {
// "$ref": "TestIamPermissionsRequest"
// },
// "response": {
// "$ref": "TestIamPermissionsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/service.management",
// "https://www.googleapis.com/auth/service.management.readonly"
// ]
// }
}
// method id "servicemanagement.services.undelete":
type ServicesUndeleteCall struct {
s *APIService
serviceName string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Undelete: Revives a previously deleted managed service. The method
// restores the service using the configuration at the time the service
// was deleted. The target service must exist and must have been deleted
// within the last 30 days. Operation
//
// - serviceName: The name of the service. See the overview
// (/service-management/overview) for naming requirements. For
// example: `example.googleapis.com`.
func (r *ServicesService) Undelete(serviceName string) *ServicesUndeleteCall {
c := &ServicesUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.serviceName = serviceName
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesUndeleteCall) Fields(s ...googleapi.Field) *ServicesUndeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesUndeleteCall) Context(ctx context.Context) *ServicesUndeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesUndeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesUndeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services/{serviceName}:undelete")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"serviceName": c.serviceName,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.services.undelete" call.
// Exactly one of *Operation or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ServicesUndeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Revives a previously deleted managed service. The method restores the service using the configuration at the time the service was deleted. The target service must exist and must have been deleted within the last 30 days. Operation",
// "flatPath": "v1/services/{serviceName}:undelete",
// "httpMethod": "POST",
// "id": "servicemanagement.services.undelete",
// "parameterOrder": [
// "serviceName"
// ],
// "parameters": {
// "serviceName": {
// "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/services/{serviceName}:undelete",
// "response": {
// "$ref": "Operation"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/service.management"
// ]
// }
}
// method id "servicemanagement.services.configs.create":
type ServicesConfigsCreateCall struct {
s *APIService
serviceName string
service *Service
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a new service configuration (version) for a managed
// service. This method only stores the service configuration. To roll
// out the service configuration to backend systems please call
// CreateServiceRollout. Only the 100 most recent service configurations
// and ones referenced by existing rollouts are kept for each service.
// The rest will be deleted eventually.
//
// - serviceName: The name of the service. See the overview
// (/service-management/overview) for naming requirements. For
// example: `example.googleapis.com`.
func (r *ServicesConfigsService) Create(serviceName string, service *Service) *ServicesConfigsCreateCall {
c := &ServicesConfigsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.serviceName = serviceName
c.service = service
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesConfigsCreateCall) Fields(s ...googleapi.Field) *ServicesConfigsCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesConfigsCreateCall) Context(ctx context.Context) *ServicesConfigsCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesConfigsCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesConfigsCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.service)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services/{serviceName}/configs")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"serviceName": c.serviceName,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.services.configs.create" call.
// Exactly one of *Service or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Service.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ServicesConfigsCreateCall) Do(opts ...googleapi.CallOption) (*Service, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Service{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a new service configuration (version) for a managed service. This method only stores the service configuration. To roll out the service configuration to backend systems please call CreateServiceRollout. Only the 100 most recent service configurations and ones referenced by existing rollouts are kept for each service. The rest will be deleted eventually.",
// "flatPath": "v1/services/{serviceName}/configs",
// "httpMethod": "POST",
// "id": "servicemanagement.services.configs.create",
// "parameterOrder": [
// "serviceName"
// ],
// "parameters": {
// "serviceName": {
// "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/services/{serviceName}/configs",
// "request": {
// "$ref": "Service"
// },
// "response": {
// "$ref": "Service"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/service.management"
// ]
// }
}
// method id "servicemanagement.services.configs.get":
type ServicesConfigsGetCall struct {
s *APIService
serviceName string
configId string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a service configuration (version) for a managed service.
//
// - configId: The id of the service configuration resource. This field
// must be specified for the server to return all fields, including
// `SourceInfo`.
// - serviceName: The name of the service. See the overview
// (/service-management/overview) for naming requirements. For
// example: `example.googleapis.com`.
func (r *ServicesConfigsService) Get(serviceName string, configId string) *ServicesConfigsGetCall {
c := &ServicesConfigsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.serviceName = serviceName
c.configId = configId
return c
}
// View sets the optional parameter "view": Specifies which parts of the
// Service Config should be returned in the response.
//
// Possible values:
// "BASIC" - Server response includes all fields except SourceInfo.
// "FULL" - Server response includes all fields including SourceInfo.
// SourceFiles are of type 'google.api.servicemanagement.v1.ConfigFile'
// and are only available for configs created using the
// SubmitConfigSource method.
func (c *ServicesConfigsGetCall) View(view string) *ServicesConfigsGetCall {
c.urlParams_.Set("view", view)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesConfigsGetCall) Fields(s ...googleapi.Field) *ServicesConfigsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ServicesConfigsGetCall) IfNoneMatch(entityTag string) *ServicesConfigsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesConfigsGetCall) Context(ctx context.Context) *ServicesConfigsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesConfigsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesConfigsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services/{serviceName}/configs/{configId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"serviceName": c.serviceName,
"configId": c.configId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.services.configs.get" call.
// Exactly one of *Service or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Service.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ServicesConfigsGetCall) Do(opts ...googleapi.CallOption) (*Service, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Service{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a service configuration (version) for a managed service.",
// "flatPath": "v1/services/{serviceName}/configs/{configId}",
// "httpMethod": "GET",
// "id": "servicemanagement.services.configs.get",
// "parameterOrder": [
// "serviceName",
// "configId"
// ],
// "parameters": {
// "configId": {
// "description": "Required. The id of the service configuration resource. This field must be specified for the server to return all fields, including `SourceInfo`.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "serviceName": {
// "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "view": {
// "description": "Specifies which parts of the Service Config should be returned in the response.",
// "enum": [
// "BASIC",
// "FULL"
// ],
// "enumDescriptions": [
// "Server response includes all fields except SourceInfo.",
// "Server response includes all fields including SourceInfo. SourceFiles are of type 'google.api.servicemanagement.v1.ConfigFile' and are only available for configs created using the SubmitConfigSource method."
// ],
// "location": "query",
// "type": "string"
// }
// },
// "path": "v1/services/{serviceName}/configs/{configId}",
// "response": {
// "$ref": "Service"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/service.management",
// "https://www.googleapis.com/auth/service.management.readonly"
// ]
// }
}
// method id "servicemanagement.services.configs.list":
type ServicesConfigsListCall struct {
s *APIService
serviceName string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists the history of the service configuration for a managed
// service, from the newest to the oldest.
//
// - serviceName: The name of the service. See the overview
// (/service-management/overview) for naming requirements. For
// example: `example.googleapis.com`.
func (r *ServicesConfigsService) List(serviceName string) *ServicesConfigsListCall {
c := &ServicesConfigsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.serviceName = serviceName
return c
}
// PageSize sets the optional parameter "pageSize": The max number of
// items to include in the response list. Page size is 50 if not
// specified. Maximum value is 100.
func (c *ServicesConfigsListCall) PageSize(pageSize int64) *ServicesConfigsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": The token of the
// page to retrieve.
func (c *ServicesConfigsListCall) PageToken(pageToken string) *ServicesConfigsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesConfigsListCall) Fields(s ...googleapi.Field) *ServicesConfigsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ServicesConfigsListCall) IfNoneMatch(entityTag string) *ServicesConfigsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesConfigsListCall) Context(ctx context.Context) *ServicesConfigsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesConfigsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesConfigsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services/{serviceName}/configs")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"serviceName": c.serviceName,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.services.configs.list" call.
// Exactly one of *ListServiceConfigsResponse or error will be non-nil.
// Any non-2xx status code is an error. Response headers are in either
// *ListServiceConfigsResponse.ServerResponse.Header or (if a response
// was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ServicesConfigsListCall) Do(opts ...googleapi.CallOption) (*ListServiceConfigsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListServiceConfigsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists the history of the service configuration for a managed service, from the newest to the oldest.",
// "flatPath": "v1/services/{serviceName}/configs",
// "httpMethod": "GET",
// "id": "servicemanagement.services.configs.list",
// "parameterOrder": [
// "serviceName"
// ],
// "parameters": {
// "pageSize": {
// "description": "The max number of items to include in the response list. Page size is 50 if not specified. Maximum value is 100.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "The token of the page to retrieve.",
// "location": "query",
// "type": "string"
// },
// "serviceName": {
// "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/services/{serviceName}/configs",
// "response": {
// "$ref": "ListServiceConfigsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/service.management",
// "https://www.googleapis.com/auth/service.management.readonly"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ServicesConfigsListCall) Pages(ctx context.Context, f func(*ListServiceConfigsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "servicemanagement.services.configs.submit":
type ServicesConfigsSubmitCall struct {
s *APIService
serviceName string
submitconfigsourcerequest *SubmitConfigSourceRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Submit: Creates a new service configuration (version) for a managed
// service based on user-supplied configuration source files (for
// example: OpenAPI Specification). This method stores the source
// configurations as well as the generated service configuration. To
// rollout the service configuration to other services, please call
// CreateServiceRollout. Only the 100 most recent configuration sources
// and ones referenced by existing service configurtions are kept for
// each service. The rest will be deleted eventually. Operation
//
// - serviceName: The name of the service. See the overview
// (/service-management/overview) for naming requirements. For
// example: `example.googleapis.com`.
func (r *ServicesConfigsService) Submit(serviceName string, submitconfigsourcerequest *SubmitConfigSourceRequest) *ServicesConfigsSubmitCall {
c := &ServicesConfigsSubmitCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.serviceName = serviceName
c.submitconfigsourcerequest = submitconfigsourcerequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesConfigsSubmitCall) Fields(s ...googleapi.Field) *ServicesConfigsSubmitCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesConfigsSubmitCall) Context(ctx context.Context) *ServicesConfigsSubmitCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesConfigsSubmitCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesConfigsSubmitCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.submitconfigsourcerequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services/{serviceName}/configs:submit")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"serviceName": c.serviceName,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.services.configs.submit" call.
// Exactly one of *Operation or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ServicesConfigsSubmitCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a new service configuration (version) for a managed service based on user-supplied configuration source files (for example: OpenAPI Specification). This method stores the source configurations as well as the generated service configuration. To rollout the service configuration to other services, please call CreateServiceRollout. Only the 100 most recent configuration sources and ones referenced by existing service configurtions are kept for each service. The rest will be deleted eventually. Operation",
// "flatPath": "v1/services/{serviceName}/configs:submit",
// "httpMethod": "POST",
// "id": "servicemanagement.services.configs.submit",
// "parameterOrder": [
// "serviceName"
// ],
// "parameters": {
// "serviceName": {
// "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/services/{serviceName}/configs:submit",
// "request": {
// "$ref": "SubmitConfigSourceRequest"
// },
// "response": {
// "$ref": "Operation"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/service.management"
// ]
// }
}
// method id "servicemanagement.services.consumers.getIamPolicy":
type ServicesConsumersGetIamPolicyCall struct {
s *APIService
resource string
getiampolicyrequest *GetIamPolicyRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// GetIamPolicy: Gets the access control policy for a resource. Returns
// an empty policy if the resource exists and does not have a policy
// set.
//
// - resource: REQUIRED: The resource for which the policy is being
// requested. See the operation documentation for the appropriate
// value for this field.
func (r *ServicesConsumersService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ServicesConsumersGetIamPolicyCall {
c := &ServicesConsumersGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.resource = resource
c.getiampolicyrequest = getiampolicyrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesConsumersGetIamPolicyCall) Fields(s ...googleapi.Field) *ServicesConsumersGetIamPolicyCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesConsumersGetIamPolicyCall) Context(ctx context.Context) *ServicesConsumersGetIamPolicyCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesConsumersGetIamPolicyCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesConsumersGetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"resource": c.resource,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.services.consumers.getIamPolicy" call.
// Exactly one of *Policy or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Policy.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ServicesConsumersGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Policy{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.",
// "flatPath": "v1/services/{servicesId}/consumers/{consumersId}:getIamPolicy",
// "httpMethod": "POST",
// "id": "servicemanagement.services.consumers.getIamPolicy",
// "parameterOrder": [
// "resource"
// ],
// "parameters": {
// "resource": {
// "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.",
// "location": "path",
// "pattern": "^services/[^/]+/consumers/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+resource}:getIamPolicy",
// "request": {
// "$ref": "GetIamPolicyRequest"
// },
// "response": {
// "$ref": "Policy"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/service.management",
// "https://www.googleapis.com/auth/service.management.readonly"
// ]
// }
}
// method id "servicemanagement.services.consumers.setIamPolicy":
type ServicesConsumersSetIamPolicyCall struct {
s *APIService
resource string
setiampolicyrequest *SetIamPolicyRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// SetIamPolicy: Sets the access control policy on the specified
// resource. Replaces any existing policy. Can return `NOT_FOUND`,
// `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
//
// - resource: REQUIRED: The resource for which the policy is being
// specified. See the operation documentation for the appropriate
// value for this field.
func (r *ServicesConsumersService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ServicesConsumersSetIamPolicyCall {
c := &ServicesConsumersSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.resource = resource
c.setiampolicyrequest = setiampolicyrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesConsumersSetIamPolicyCall) Fields(s ...googleapi.Field) *ServicesConsumersSetIamPolicyCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesConsumersSetIamPolicyCall) Context(ctx context.Context) *ServicesConsumersSetIamPolicyCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesConsumersSetIamPolicyCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesConsumersSetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"resource": c.resource,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.services.consumers.setIamPolicy" call.
// Exactly one of *Policy or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Policy.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ServicesConsumersSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Policy{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.",
// "flatPath": "v1/services/{servicesId}/consumers/{consumersId}:setIamPolicy",
// "httpMethod": "POST",
// "id": "servicemanagement.services.consumers.setIamPolicy",
// "parameterOrder": [
// "resource"
// ],
// "parameters": {
// "resource": {
// "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.",
// "location": "path",
// "pattern": "^services/[^/]+/consumers/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+resource}:setIamPolicy",
// "request": {
// "$ref": "SetIamPolicyRequest"
// },
// "response": {
// "$ref": "Policy"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/service.management"
// ]
// }
}
// method id "servicemanagement.services.consumers.testIamPermissions":
type ServicesConsumersTestIamPermissionsCall struct {
s *APIService
resource string
testiampermissionsrequest *TestIamPermissionsRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// TestIamPermissions: Returns permissions that a caller has on the
// specified resource. If the resource does not exist, this will return
// an empty set of permissions, not a `NOT_FOUND` error. Note: This
// operation is designed to be used for building permission-aware UIs
// and command-line tools, not for authorization checking. This
// operation may "fail open" without warning.
//
// - resource: REQUIRED: The resource for which the policy detail is
// being requested. See the operation documentation for the
// appropriate value for this field.
func (r *ServicesConsumersService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ServicesConsumersTestIamPermissionsCall {
c := &ServicesConsumersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.resource = resource
c.testiampermissionsrequest = testiampermissionsrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesConsumersTestIamPermissionsCall) Fields(s ...googleapi.Field) *ServicesConsumersTestIamPermissionsCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesConsumersTestIamPermissionsCall) Context(ctx context.Context) *ServicesConsumersTestIamPermissionsCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesConsumersTestIamPermissionsCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesConsumersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"resource": c.resource,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.services.consumers.testIamPermissions" call.
// Exactly one of *TestIamPermissionsResponse or error will be non-nil.
// Any non-2xx status code is an error. Response headers are in either
// *TestIamPermissionsResponse.ServerResponse.Header or (if a response
// was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ServicesConsumersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &TestIamPermissionsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.",
// "flatPath": "v1/services/{servicesId}/consumers/{consumersId}:testIamPermissions",
// "httpMethod": "POST",
// "id": "servicemanagement.services.consumers.testIamPermissions",
// "parameterOrder": [
// "resource"
// ],
// "parameters": {
// "resource": {
// "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.",
// "location": "path",
// "pattern": "^services/[^/]+/consumers/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+resource}:testIamPermissions",
// "request": {
// "$ref": "TestIamPermissionsRequest"
// },
// "response": {
// "$ref": "TestIamPermissionsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/service.management",
// "https://www.googleapis.com/auth/service.management.readonly"
// ]
// }
}
// method id "servicemanagement.services.rollouts.create":
type ServicesRolloutsCreateCall struct {
s *APIService
serviceName string
rollout *Rollout
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a new service configuration rollout. Based on
// rollout, the Google Service Management will roll out the service
// configurations to different backend services. For example, the
// logging configuration will be pushed to Google Cloud Logging. Please
// note that any previous pending and running Rollouts and associated
// Operations will be automatically cancelled so that the latest Rollout
// will not be blocked by previous Rollouts. Only the 100 most recent
// (in any state) and the last 10 successful (if not already part of the
// set of 100 most recent) rollouts are kept for each service. The rest
// will be deleted eventually. Operation
//
// - serviceName: The name of the service. See the overview
// (/service-management/overview) for naming requirements. For
// example: `example.googleapis.com`.
func (r *ServicesRolloutsService) Create(serviceName string, rollout *Rollout) *ServicesRolloutsCreateCall {
c := &ServicesRolloutsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.serviceName = serviceName
c.rollout = rollout
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesRolloutsCreateCall) Fields(s ...googleapi.Field) *ServicesRolloutsCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesRolloutsCreateCall) Context(ctx context.Context) *ServicesRolloutsCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesRolloutsCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesRolloutsCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.rollout)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services/{serviceName}/rollouts")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"serviceName": c.serviceName,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.services.rollouts.create" call.
// Exactly one of *Operation or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ServicesRolloutsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a new service configuration rollout. Based on rollout, the Google Service Management will roll out the service configurations to different backend services. For example, the logging configuration will be pushed to Google Cloud Logging. Please note that any previous pending and running Rollouts and associated Operations will be automatically cancelled so that the latest Rollout will not be blocked by previous Rollouts. Only the 100 most recent (in any state) and the last 10 successful (if not already part of the set of 100 most recent) rollouts are kept for each service. The rest will be deleted eventually. Operation",
// "flatPath": "v1/services/{serviceName}/rollouts",
// "httpMethod": "POST",
// "id": "servicemanagement.services.rollouts.create",
// "parameterOrder": [
// "serviceName"
// ],
// "parameters": {
// "serviceName": {
// "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/services/{serviceName}/rollouts",
// "request": {
// "$ref": "Rollout"
// },
// "response": {
// "$ref": "Operation"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/service.management"
// ]
// }
}
// method id "servicemanagement.services.rollouts.get":
type ServicesRolloutsGetCall struct {
s *APIService
serviceName string
rolloutId string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a service configuration rollout.
//
// - rolloutId: The id of the rollout resource.
// - serviceName: The name of the service. See the overview
// (/service-management/overview) for naming requirements. For
// example: `example.googleapis.com`.
func (r *ServicesRolloutsService) Get(serviceName string, rolloutId string) *ServicesRolloutsGetCall {
c := &ServicesRolloutsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.serviceName = serviceName
c.rolloutId = rolloutId
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesRolloutsGetCall) Fields(s ...googleapi.Field) *ServicesRolloutsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ServicesRolloutsGetCall) IfNoneMatch(entityTag string) *ServicesRolloutsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesRolloutsGetCall) Context(ctx context.Context) *ServicesRolloutsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesRolloutsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesRolloutsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services/{serviceName}/rollouts/{rolloutId}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"serviceName": c.serviceName,
"rolloutId": c.rolloutId,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.services.rollouts.get" call.
// Exactly one of *Rollout or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Rollout.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ServicesRolloutsGetCall) Do(opts ...googleapi.CallOption) (*Rollout, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Rollout{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a service configuration rollout.",
// "flatPath": "v1/services/{serviceName}/rollouts/{rolloutId}",
// "httpMethod": "GET",
// "id": "servicemanagement.services.rollouts.get",
// "parameterOrder": [
// "serviceName",
// "rolloutId"
// ],
// "parameters": {
// "rolloutId": {
// "description": "Required. The id of the rollout resource.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "serviceName": {
// "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/services/{serviceName}/rollouts/{rolloutId}",
// "response": {
// "$ref": "Rollout"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/service.management",
// "https://www.googleapis.com/auth/service.management.readonly"
// ]
// }
}
// method id "servicemanagement.services.rollouts.list":
type ServicesRolloutsListCall struct {
s *APIService
serviceName string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists the history of the service configuration rollouts for a
// managed service, from the newest to the oldest.
//
// - serviceName: The name of the service. See the overview
// (/service-management/overview) for naming requirements. For
// example: `example.googleapis.com`.
func (r *ServicesRolloutsService) List(serviceName string) *ServicesRolloutsListCall {
c := &ServicesRolloutsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.serviceName = serviceName
return c
}
// Filter sets the optional parameter "filter": Required. Use `filter`
// to return subset of rollouts. The following filters are supported: --
// To limit the results to only those in status
// (google.api.servicemanagement.v1.RolloutStatus) 'SUCCESS', use
// filter='status=SUCCESS' -- To limit the results to those in status
// (google.api.servicemanagement.v1.RolloutStatus) 'CANCELLED' or
// 'FAILED', use filter='status=CANCELLED OR status=FAILED'
func (c *ServicesRolloutsListCall) Filter(filter string) *ServicesRolloutsListCall {
c.urlParams_.Set("filter", filter)
return c
}
// PageSize sets the optional parameter "pageSize": The max number of
// items to include in the response list. Page size is 50 if not
// specified. Maximum value is 100.
func (c *ServicesRolloutsListCall) PageSize(pageSize int64) *ServicesRolloutsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": The token of the
// page to retrieve.
func (c *ServicesRolloutsListCall) PageToken(pageToken string) *ServicesRolloutsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesRolloutsListCall) Fields(s ...googleapi.Field) *ServicesRolloutsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ServicesRolloutsListCall) IfNoneMatch(entityTag string) *ServicesRolloutsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesRolloutsListCall) Context(ctx context.Context) *ServicesRolloutsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesRolloutsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesRolloutsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210917")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/services/{serviceName}/rollouts")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"serviceName": c.serviceName,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "servicemanagement.services.rollouts.list" call.
// Exactly one of *ListServiceRolloutsResponse or error will be non-nil.
// Any non-2xx status code is an error. Response headers are in either
// *ListServiceRolloutsResponse.ServerResponse.Header or (if a response
// was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ServicesRolloutsListCall) Do(opts ...googleapi.CallOption) (*ListServiceRolloutsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListServiceRolloutsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists the history of the service configuration rollouts for a managed service, from the newest to the oldest.",
// "flatPath": "v1/services/{serviceName}/rollouts",
// "httpMethod": "GET",
// "id": "servicemanagement.services.rollouts.list",
// "parameterOrder": [
// "serviceName"
// ],
// "parameters": {
// "filter": {
// "description": "Required. Use `filter` to return subset of rollouts. The following filters are supported: -- To limit the results to only those in [status](google.api.servicemanagement.v1.RolloutStatus) 'SUCCESS', use filter='status=SUCCESS' -- To limit the results to those in [status](google.api.servicemanagement.v1.RolloutStatus) 'CANCELLED' or 'FAILED', use filter='status=CANCELLED OR status=FAILED'",
// "location": "query",
// "type": "string"
// },
// "pageSize": {
// "description": "The max number of items to include in the response list. Page size is 50 if not specified. Maximum value is 100.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "The token of the page to retrieve.",
// "location": "query",
// "type": "string"
// },
// "serviceName": {
// "description": "Required. The name of the service. See the [overview](/service-management/overview) for naming requirements. For example: `example.googleapis.com`.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/services/{serviceName}/rollouts",
// "response": {
// "$ref": "ListServiceRolloutsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/cloud-platform.read-only",
// "https://www.googleapis.com/auth/service.management",
// "https://www.googleapis.com/auth/service.management.readonly"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ServicesRolloutsListCall) Pages(ctx context.Context, f func(*ListServiceRolloutsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
| NewServicesConfigsService |
authServices.js | hackApp.service( 'AuthService', [ '$q', 'ApiService', '$injector', function ( $q, ApiService, $injector ) {
var me = this;
me.token = undefined;
var isLogged = function ( ) {
return angular.isDefined( me.token );
};
var getToken = function ( ) { |
var setToken = function ( uToken ) {
return me.token = uToken;
};
var logout = function ( ) {
token = undefined;
};
var login = function ( userApiToken ) {
var loginDeferred = $q.defer();
ApiService.login( { token : userApiToken } ).$promise
.then( function ( response ) {
me.token = userApiToken;
var cookies = $injector.get( '$cookies' );
cookies.put( 'u-token', userApiToken );
loginDeferred.resolve( true );
} )
.catch( function ( error ) {
me.token = undefined;
loginDeferred.reject( 'Token invalid.' );
} );
return loginDeferred.promise;
};
return {
isLogged : isLogged,
getToken : getToken,
setToken : setToken,
logout : logout,
login : login
}
} ] ); | return me.token;
}; |
arithmetic_operators.py | """
Task
The provided code stub reads two integers from STDIN, and . Add code to print three lines where:
The first line contains the sum of the two numbers.
The second line contains the difference of the two numbers (first - second).
The third line contains the product of the two numbers.
Example
Print the following:
8
-2
15
Input Format
The first line contains the first integer, .
The second line contains the second integer, .
Constraints
Output Format
Print the three lines as explained above.
| 2
Sample Output 0
5
1
6
Explanation 0
"""
def check(n):
if n>=1 and n<=10**10:
return True
if __name__ == '__main__':
a = int(input())
b = int(input())
if check(a) and check(b):
print(a+b)
print(a-b)
print(a*b) | Sample Input 0
3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.