file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
network_interface_ip_configuration.py
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .sub_resource import SubResource class NetworkInterfaceIPConfiguration(SubResource): """IPConfiguration in a network interface. :param id: Resource Identifier. :type id: str :param load_balancer_backend_address_pools: The reference of LoadBalancerBackendAddressPool resource. :type load_balancer_backend_address_pools: list[~azure.mgmt.network.v2015_06_15.models.BackendAddressPool] :param load_balancer_inbound_nat_rules: A list of references of LoadBalancerInboundNatRules. :type load_balancer_inbound_nat_rules: list[~azure.mgmt.network.v2015_06_15.models.InboundNatRule] :param private_ip_address: :type private_ip_address: str :param private_ip_allocation_method: Defines how a private IP address is assigned. Possible values are: 'Static' and 'Dynamic'. Possible values include: 'Static', 'Dynamic' :type private_ip_allocation_method: str or ~azure.mgmt.network.v2015_06_15.models.IPAllocationMethod :param subnet: :type subnet: ~azure.mgmt.network.v2015_06_15.models.Subnet :param public_ip_address: :type public_ip_address: ~azure.mgmt.network.v2015_06_15.models.PublicIPAddress :param provisioning_state: :type provisioning_state: str :param name: The name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'load_balancer_backend_address_pools': {'key': 'properties.loadBalancerBackendAddressPools', 'type': '[BackendAddressPool]'}, 'load_balancer_inbound_nat_rules': {'key': 'properties.loadBalancerInboundNatRules', 'type': '[InboundNatRule]'}, 'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'}, 'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'}, 'subnet': {'key': 'properties.subnet', 'type': 'Subnet'}, 'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'PublicIPAddress'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, } def
(self, **kwargs): super(NetworkInterfaceIPConfiguration, self).__init__(**kwargs) self.load_balancer_backend_address_pools = kwargs.get('load_balancer_backend_address_pools', None) self.load_balancer_inbound_nat_rules = kwargs.get('load_balancer_inbound_nat_rules', None) self.private_ip_address = kwargs.get('private_ip_address', None) self.private_ip_allocation_method = kwargs.get('private_ip_allocation_method', None) self.subnet = kwargs.get('subnet', None) self.public_ip_address = kwargs.get('public_ip_address', None) self.provisioning_state = kwargs.get('provisioning_state', None) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None)
__init__
table.tsx
import * as _ from 'lodash-es'; import * as PropTypes from 'prop-types'; import * as React from 'react'; import { connect } from 'react-redux'; import { withTranslation } from 'react-i18next'; import { getNodeRoles, getMachinePhase, nodeMemory, nodeCPU, nodeFS, nodePods, nodeMachine, nodeInstanceType, nodeZone, pvcUsed, snapshotSize, snapshotSource, ALL_NAMESPACES_KEY, getName, } from '@console/shared'; import * as UIActions from '../../actions/ui'; import { alertingRuleSource, alertingRuleStateOrder, alertSeverityOrder, alertSource, alertStateOrder, silenceFiringAlertsOrder, silenceStateOrder, } from '../../reducers/monitoring'; import { ingressValidHosts } from '../ingress'; import { convertToBaseValue, EmptyBox, StatusBox, WithScrollContainer } from '../utils'; import { CustomResourceDefinitionKind, getClusterOperatorStatus, getClusterOperatorVersion, getJobTypeAndCompletions, getLatestVersionForCRD, getTemplateInstanceStatus, K8sResourceKind, K8sResourceKindReference, NodeKind, planExternalName, PodKind, podPhase, podReadiness, podRestarts, serviceCatalogStatus, serviceClassDisplayName, MachineKind, VolumeSnapshotKind, } from '../../module/k8s'; import { IRowData, // eslint-disable-line no-unused-vars IExtraData, // eslint-disable-line no-unused-vars Table as PfTable, TableHeader, TableBody, TableGridBreakpoint, SortByDirection, OnSelect, } from '@patternfly/react-table'; import { CellMeasurerCache, CellMeasurer } from 'react-virtualized'; import { AutoSizer, VirtualTableBody, WindowScroller, } from '@patternfly/react-virtualized-extension'; import { tableFilters } from './table-filters'; import { PackageManifestKind } from '@console/operator-lifecycle-manager/src/types'; import { defaultChannelFor } from '@console/operator-lifecycle-manager/src/components'; const rowFiltersToFilterFuncs = (rowFilters) => { return (rowFilters || []) .filter((f) => f.type && _.isFunction(f.filter)) .reduce((acc, f) => ({ ...acc, [f.type]: f.filter }), {}); }; const getAllTableFilters = (rowFilters) => ({ ...tableFilters, ...rowFiltersToFilterFuncs(rowFilters), }); export const getFilteredRows = (_filters, rowFilters, objects) => { if (_.isEmpty(_filters)) { return objects; } const allTableFilters = getAllTableFilters(rowFilters); let filteredObjects = objects; _.each(_filters, (value, name) => { const filter = allTableFilters[name]; if (_.isFunction(filter)) { filteredObjects = _.filter(filteredObjects, (o) => filter(value, o)); } }); return filteredObjects; }; const filterPropType = (props, propName, componentName) => { if (!props) { return; } const allTableFilters = getAllTableFilters(props.rowFilters); for (const key of _.keys(props[propName])) { if (key in allTableFilters || key === 'loadTest') { continue; } return new Error( `Invalid prop '${propName}' in '${componentName}'. '${key}' is not a valid filter type!`, ); } }; const sorts = { alertingRuleSource, alertingRuleStateOrder, alertSeverityOrder, alertSource, alertStateOrder, crdLatestVersion: (crd: CustomResourceDefinitionKind): string => getLatestVersionForCRD(crd), daemonsetNumScheduled: (daemonset) => _.toInteger(_.get(daemonset, 'status.currentNumberScheduled')), dataSize: (resource) => _.size(_.get(resource, 'data')) + _.size(_.get(resource, 'binaryData')), ingressValidHosts, serviceCatalogStatus, jobCompletionsSucceeded: (job) => job?.status?.succeeded || 0, jobType: (job) => getJobTypeAndCompletions(job).type, nodeReadiness: (node: NodeKind) => { let readiness = _.get(node, 'status.conditions'); readiness = _.find(readiness, { type: 'Ready' }); return _.get(readiness, 'status'); }, numReplicas: (resource) => _.toInteger(_.get(resource, 'status.replicas')), planExternalName, namespaceCPU: (ns: K8sResourceKind): number => UIActions.getNamespaceMetric(ns, 'cpu'), namespaceMemory: (ns: K8sResourceKind): number => UIActions.getNamespaceMetric(ns, 'memory'), podCPU: (pod: PodKind): number => UIActions.getPodMetric(pod, 'cpu'), podMemory: (pod: PodKind): number => UIActions.getPodMetric(pod, 'memory'), podPhase, podReadiness: (pod: PodKind): number => podReadiness(pod).readyCount, podRestarts, pvStorage: (pv) => _.toInteger(convertToBaseValue(pv?.spec?.capacity?.storage)), pvcStorage: (pvc) => _.toInteger(convertToBaseValue(pvc?.status?.capacity?.storage)), serviceClassDisplayName, silenceFiringAlertsOrder, silenceStateOrder, string: (val) => JSON.stringify(val), number: (val) => _.toNumber(val), getClusterOperatorStatus, getClusterOperatorVersion, getTemplateInstanceStatus, nodeRoles: (node: NodeKind): string => { const roles = getNodeRoles(node); return roles.sort().join(', '); }, nodeMemory: (node: NodeKind): number => nodeMemory(node), nodeCPU: (node: NodeKind): number => nodeCPU(node), nodeFS: (node: NodeKind): number => nodeFS(node), nodeMachine: (node: NodeKind): string => nodeMachine(node), nodeInstanceType: (node: NodeKind): string => nodeInstanceType(node), nodeZone: (node: NodeKind): string => nodeZone(node), machinePhase: (machine: MachineKind): string => getMachinePhase(machine), nodePods: (node: NodeKind): number => nodePods(node), pvcUsed: (pvc: K8sResourceKind): number => pvcUsed(pvc), volumeSnapshotSize: (snapshot: VolumeSnapshotKind): number => snapshotSize(snapshot), volumeSnapshotSource: (snapshot: VolumeSnapshotKind): string => snapshotSource(snapshot), snapshotLastRestore: (snapshot: K8sResourceKind, { restores }) => restores[getName(snapshot)]?.status?.restoreTime, sortPackageManifestByDefaultChannelName: (packageManifest: PackageManifestKind): string => { const channel = defaultChannelFor(packageManifest); return channel?.currentCSVDesc?.displayName; }, }; const stateToProps = ( { UI }, { customData = {}, customSorts = {}, data = [], defaultSortField = 'metadata.name', defaultSortFunc = undefined, defaultSortOrder = SortByDirection.asc, filters = {}, loaded = false, reduxID = null, reduxIDs = null, staticFilters = [{}], rowFilters = [], isPinned, }: TableProps, ) => { const allFilters = staticFilters ? Object.assign({}, filters, ...staticFilters) : filters; const newData = getFilteredRows(allFilters, rowFilters, data); const listId = reduxIDs ? reduxIDs.join(',') : reduxID; // Only default to 'metadata.name' if no `defaultSortFunc` const currentSortField = UI.getIn( ['listSorts', listId, 'field'], defaultSortFunc ? undefined : defaultSortField, ); const currentSortFunc = UI.getIn(['listSorts', listId, 'func'], defaultSortFunc); const currentSortOrder = UI.getIn(['listSorts', listId, 'orderBy'], defaultSortOrder); if (loaded) { let sortBy: string | Function = 'metadata.name'; if (currentSortField) { sortBy = (resource) => sorts.string(_.get(resource, currentSortField, '')); } else if (currentSortFunc && customSorts[currentSortFunc]) { // Sort resources by a function in the 'customSorts' prop sortBy = customSorts[currentSortFunc]; } else if (currentSortFunc && sorts[currentSortFunc]) { // Sort resources by a function in the 'sorts' object sortBy = sorts[currentSortFunc]; } const getSortValue = (resource) => { const val = _.isFunction(sortBy) ? sortBy(resource, customData) : _.get(resource, sortBy as string); return val ?? ''; }; newData?.sort((a, b) => { const lang = navigator.languages[0] || navigator.language; // Use `localCompare` with `numeric: true` for a natural sort order (e.g., pv-1, pv-9, pv-10) const compareOpts = { numeric: true, ignorePunctuation: true }; const aValue = getSortValue(a); const bValue = getSortValue(b); const aPinned = isPinned?.(a); const bPinned = isPinned?.(b); if (aPinned !== bPinned) { return aPinned ? -1 : +1; } const result: number = Number.isFinite(aValue) && Number.isFinite(bValue) ? aValue - bValue : `${aValue}`.localeCompare(`${bValue}`, lang, compareOpts); if (result !== 0) { return currentSortOrder === SortByDirection.asc ? result : result * -1; } // Use name as a secondary sort for a stable sort. const aName = a?.metadata?.name || ''; const bName = b?.metadata?.name || ''; return aName.localeCompare(bName, lang, compareOpts); }); } return { currentSortField, currentSortFunc, currentSortOrder, data: newData, unfilteredData: data, listId, }; }; // Common table row/columns helper SFCs for implementing accessible data grid export const TableRow: React.SFC<TableRowProps> = ({ id, index, trKey, style, className, ...props }) => { return ( <tr {...props} data-id={id} data-index={index} data-test-rows="resource-row" data-key={trKey} style={style} className={className} role="row" /> ); }; TableRow.displayName = 'TableRow'; export type TableRowProps = { id: any; index: number; title?: string; trKey: string; style: object; className?: string; }; const BREAKPOINT_SM = 576; const BREAKPOINT_MD = 768; const BREAKPOINT_LG = 992; const BREAKPOINT_XL = 1200; const BREAKPOINT_XXL = 1400; const MAX_COL_XS = 2; const MAX_COL_SM = 4; const MAX_COL_MD = 4; const MAX_COL_LG = 6; const MAX_COL_XL = 8; const isColumnVisible = ( columnID: string, columns: Set<string> = new Set(), showNamespaceOverride, ) => { const showNamespace = columnID !== 'namespace' || UIActions.getActiveNamespace() === ALL_NAMESPACES_KEY || showNamespaceOverride; if (_.isEmpty(columns) && showNamespace) { return true; } if (!columns.has(columnID) || !showNamespace) { return false; } const widthInPixels = window.innerWidth; const columnIndex = [...columns].indexOf(columnID); if (widthInPixels < BREAKPOINT_SM) { return columnIndex < MAX_COL_XS; } if (widthInPixels < BREAKPOINT_MD) { return columnIndex < MAX_COL_SM; } if (widthInPixels < BREAKPOINT_LG) { return columnIndex < MAX_COL_MD; } if (widthInPixels < BREAKPOINT_XL) { return columnIndex < MAX_COL_LG; } if (widthInPixels < BREAKPOINT_XXL) { return columnIndex < MAX_COL_XL; } return true; }; export const TableData: React.SFC<TableDataProps> = ({ className, columnID, columns, showNamespaceOverride, ...props }) => { return isColumnVisible(columnID, columns, showNamespaceOverride) ? ( <td {...props} className={className} role="gridcell" /> ) : null; }; TableData.displayName = 'TableData'; export type TableDataProps = { className?: string; columnID?: string; columns?: Set<string>; id?: string; showNamespaceOverride?: boolean; }; const TableWrapper: React.SFC<TableWrapperProps> = ({ virtualize, ariaLabel, ariaRowCount, ...props }) => { return virtualize ? ( <div {...props} role="grid" aria-label={ariaLabel} aria-rowcount={ariaRowCount} /> ) : ( <React.Fragment {...props} /> ); }; export type TableWrapperProps = { virtualize: boolean; ariaLabel: string; ariaRowCount: number | undefined; }; const VirtualBody: React.SFC<VirtualBodyProps> = (props) => { const { customData, Row, height, isScrolling, onChildScroll, data, columns, scrollTop, width, } = props; const cellMeasurementCache = new CellMeasurerCache({ fixedWidth: true, minHeight: 44, keyMapper: (rowIndex) => _.get(props.data[rowIndex], 'metadata.uid', rowIndex), }); const rowRenderer = ({ index, isScrolling: scrolling, isVisible, key, style, parent }) => { const rowArgs = { obj: data[index], index, columns, isScrolling: scrolling, key, style, customData, }; const row = Row(rowArgs); // do not render non visible elements (this excludes overscan) if (!isVisible) { return null; } return ( <CellMeasurer cache={cellMeasurementCache} columnIndex={0} key={key} parent={parent} rowIndex={index} > {row} </CellMeasurer> ); }; return ( <VirtualTableBody autoHeight className="pf-c-table pf-m-compact pf-m-border-rows pf-c-virtualized pf-c-window-scroller" deferredMeasurementCache={cellMeasurementCache} rowHeight={cellMeasurementCache.rowHeight} height={height || 0} isScrolling={isScrolling} onScroll={onChildScroll} overscanRowCount={10} columns={columns} rows={data} rowCount={data.length} rowRenderer={rowRenderer} scrollTop={scrollTop} width={width} /> ); }; export type RowFunctionArgs<T = any, C = any> = { obj: T; index: number; columns: any[]; isScrolling: boolean; key: string; style: object; customData?: C; }; export type RowFunction<T = any, C = any> = (args: RowFunctionArgs<T, C>) => React.ReactElement; export type VirtualBodyProps = { customData?: any; Row: RowFunction; height: number; isScrolling: boolean; onChildScroll: (...args) => any; data: any[]; columns: any[]; scrollTop: number; width: number; expand: boolean; }; export type TableProps = { customData?: any; customSorts?: { [key: string]: any }; data?: any[]; defaultSortFunc?: string; defaultSortField?: string; defaultSortOrder?: SortByDirection; showNamespaceOverride?: boolean; filters?: { [key: string]: any }; Header: (...args) => any[]; loadError?: string | Object; Row?: RowFunction; Rows?: (...args) => any[]; 'aria-label': string; onSelect?: OnSelect; virtualize?: boolean; NoDataEmptyMsg?: React.ComponentType<{}>; EmptyMsg?: React.ComponentType<{}>; loaded?: boolean; reduxID?: string; reduxIDs?: string[]; rowFilters?: any[]; label?: string; columnManagementID?: string; isPinned?: (val: any) => boolean; staticFilters?: any[]; activeColumns?: Set<string>; kinds?: string[]; }; type TablePropsFromState = {}; type TablePropsFromDispatch = {}; type TableOptionProps = { UI: any; }; type ComponentProps = { data?: any[]; filters?: Object; selected?: any; match?: any; kindObj?: K8sResourceKindReference; }; const getActiveColumns = ( Header: any, componentProps: ComponentProps, activeColumns: Set<string>, columnManagementID: string, showNamespaceOverride: boolean, ) => { let columns = Header(componentProps); if (_.isEmpty(activeColumns)) { activeColumns = new Set( columns.map((col) => { if (col.id && !col.additional) { return col.id; } }), ); } if (columnManagementID) { columns = columns?.filter( (col) => isColumnVisible(col.id, activeColumns, showNamespaceOverride) || col.title === '', ); } else { columns = columns?.filter((col) => activeColumns.has(col.id) || col.title === ''); } const showNamespace = UIActions.getActiveNamespace() === ALL_NAMESPACES_KEY || showNamespaceOverride; if (!showNamespace) { columns = columns.filter((column) => column.id !== 'namespace'); } return columns; }; export const Table = connect< TablePropsFromState, TablePropsFromDispatch, TableProps, TableOptionProps >(stateToProps, { sortList: UIActions.sortList }, null, { areStatesEqual: ({ UI: next }, { UI: prev }) => next.get('listSorts') === prev.get('listSorts'), })( withTranslation()( class TableInner extends React.Component<TableInnerProps, TableInnerState> { static propTypes = { customData: PropTypes.any, data: PropTypes.array, showNamespaceOverride: PropTypes.bool, unfilteredData: PropTypes.array, NoDataEmptyMsg: PropTypes.func, EmptyMsg: PropTypes.func, expand: PropTypes.bool, fieldSelector: PropTypes.string, filters: filterPropType, Header: PropTypes.func.isRequired, Row: PropTypes.func, Rows: PropTypes.func, loaded: PropTypes.bool, loadError: PropTypes.oneOfType([PropTypes.object, PropTypes.string]), mock: PropTypes.bool, namespace: PropTypes.string, reduxID: PropTypes.string, reduxIDs: PropTypes.array, selector: PropTypes.object, staticFilters: PropTypes.array, virtualize: PropTypes.bool, currentSortField: PropTypes.string, currentSortFunc: PropTypes.string, currentSortOrder: PropTypes.any, defaultSortField: PropTypes.string, defaultSortFunc: PropTypes.string, label: PropTypes.string, listId: PropTypes.string, sortList: PropTypes.func, onSelect: PropTypes.func, scrollElement: PropTypes.oneOf([PropTypes.object, PropTypes.func]), columnManagementID: PropTypes.string, // for column management should use gvk for workloads }; _columnShift: number; constructor(props) { super(props); const componentProps: ComponentProps = _.pick(props, [ 'data', 'filters', 'selected', 'match', 'kindObj', ]); const columns = getActiveColumns( this.props.Header, componentProps, this.props.activeColumns, this.props.columnManagementID, this.props.showNamespaceOverride, ); const { currentSortField, currentSortFunc, currentSortOrder } = props; this._columnShift = props.onSelect ? 1 : 0; //shift indexes by 1 if select provided this._applySort = this._applySort.bind(this); this._onSort = this._onSort.bind(this); this._handleResize = _.debounce(this._handleResize.bind(this), 100); let sortBy = {}; if (currentSortField && currentSortOrder) { const columnIndex = _.findIndex(columns, { sortField: currentSortField }); if (columnIndex > -1) { sortBy = { index: columnIndex + this._columnShift, direction: currentSortOrder }; } } else if (currentSortFunc && currentSortOrder) { const columnIndex = _.findIndex(columns, { sortFunc: currentSortFunc }); if (columnIndex > -1) { sortBy = { index: columnIndex + this._columnShift, direction: currentSortOrder }; } } this.state = { sortBy, columns }; props.i18n.on('languageChanged', () => { this.setState({ columns: props.Header(componentProps, props.t), }); }); } componentDidMount() { const componentProps: ComponentProps = _.pick(this.props, [ 'data', 'filters', 'selected', 'match', 'kindObj', ]); const columns = getActiveColumns( this.props.Header, componentProps, this.props.activeColumns, this.props.columnManagementID, this.props.showNamespaceOverride, ); const sp = new URLSearchParams(window.location.search); const columnIndex = _.findIndex(columns, { title: sp.get('sortBy') }); if (columnIndex > -1) { const sortOrder = sp.get('orderBy') || SortByDirection.asc; const column = columns[columnIndex]; this._applySort(column.sortField, column.sortFunc, sortOrder, column.title); this.setState({ sortBy: { index: columnIndex + this._columnShift, direction: sortOrder, }, }); } // re-render after resize window.addEventListener('resize', this._handleResize); } componentWillUnmount() { window.removeEventListener('resize', this._handleResize); } _handleResize() { this.forceUpdate(); } _applySort(sortField, sortFunc, direction, columnTitle) { const { sortList, listId, currentSortFunc } = this.props; const applySort = _.partial(sortList, listId); applySort(sortField, sortFunc || currentSortFunc, direction, columnTitle); } _onSort(event, index, direction) { event.preventDefault(); const componentProps: ComponentProps = _.pick(this.props, [ 'data', 'filters', 'selected', 'match', 'kindObj', ]); const columns = getActiveColumns( this.props.Header, componentProps, this.props.activeColumns, this.props.columnManagementID, this.props.showNamespaceOverride, ); const sortColumn = columns[index - this._columnShift]; this._applySort(sortColumn.sortField, sortColumn.sortFunc, direction, sortColumn.title); this.setState({ sortBy: { index, direction, }, }); } render() { const { columnManagementID, scrollElement, Rows, Row, expand, label, mock, onSelect, selectedResourcesForKind, 'aria-label': ariaLabel,
activeColumns, showNamespaceOverride, } = this.props; const { sortBy } = this.state; const componentProps: any = _.pick(this.props, [ 'data', 'filters', 'selected', 'match', 'kindObj', ]); const columns = getActiveColumns( Header, componentProps, activeColumns, columnManagementID, showNamespaceOverride, ); const ariaRowCount = componentProps.data && componentProps.data.length; const scrollNode = typeof scrollElement === 'function' ? scrollElement() : scrollElement; const renderVirtualizedTable = (scrollContainer) => ( <WindowScroller scrollElement={scrollContainer}> {({ height, isScrolling, registerChild, onChildScroll, scrollTop }) => ( <AutoSizer disableHeight> {({ width }) => ( <div ref={registerChild}> <VirtualBody Row={Row} customData={customData} height={height} isScrolling={isScrolling} onChildScroll={onChildScroll} data={componentProps.data} columns={columns} scrollTop={scrollTop} width={width} expand={expand} /> </div> )} </AutoSizer> )} </WindowScroller> ); const children = mock ? ( <EmptyBox label={label} /> ) : ( <TableWrapper virtualize={virtualize} ariaLabel={ariaLabel} ariaRowCount={ariaRowCount}> <PfTable cells={columns} rows={ virtualize ? [] : Rows({ componentProps, selectedResourcesForKind, customData }) } gridBreakPoint={gridBreakPoint} onSort={this._onSort} onSelect={onSelect} sortBy={sortBy} className="pf-m-compact pf-m-border-rows" role={virtualize ? 'presentation' : 'grid'} aria-label={virtualize ? null : ariaLabel} > <TableHeader /> {!virtualize && <TableBody />} </PfTable> {virtualize && (scrollNode ? ( renderVirtualizedTable(scrollNode) ) : ( <WithScrollContainer>{renderVirtualizedTable}</WithScrollContainer> ))} </TableWrapper> ); return ( <div className="co-m-table-grid co-m-table-grid--bordered"> {mock ? ( children ) : ( <StatusBox skeleton={<div className="loading-skeleton--table" />} {...this.props}> {children} </StatusBox> )} </div> ); } }, ), ); export type TableInnerProps = { 'aria-label': string; customData?: any; currentSortField?: string; currentSortFunc?: string; currentSortOrder?: any; data?: any[]; defaultSortField?: string; defaultSortFunc?: string; showNamespaceOverride?: boolean; activeColumns?: Set<string>; unfilteredData?: any[]; NoDataEmptyMsg?: React.ComponentType<{}>; EmptyMsg?: React.ComponentType<{}>; expand?: boolean; fieldSelector?: string; filters?: { [name: string]: any }; Header: (...args) => any[]; label?: string; listId?: string; loaded?: boolean; loadError?: string | Object; mock?: boolean; namespace?: string; reduxID?: string; reduxIDs?: string[]; Row?: RowFunction; Rows?: (...args) => any[]; selector?: Object; sortList?: (listId: string, field: string, func: any, orderBy: string, column: string) => any; selectedResourcesForKind?: string[]; onSelect?: ( event: React.FormEvent<HTMLInputElement>, isSelected: boolean, rowIndex: number, rowData: IRowData, extraData: IExtraData, ) => void; staticFilters?: any[]; rowFilters?: any[]; virtualize?: boolean; gridBreakPoint?: 'grid' | 'grid-md' | 'grid-lg' | 'grid-xl' | 'grid-2xl'; scrollElement?: HTMLElement | (() => HTMLElement); columnManagementID?: string; }; export type TableInnerState = { sortBy: object; columns?: any; };
virtualize = true, customData, gridBreakPoint = TableGridBreakpoint.none, Header,
ui.js
/* * Copyright (с) 2015-present, SoftIndex LLC. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ import toPromise from '../../common/toPromise'; import {parents, pick, escape, last, isDefined, pairs} from '../../common/utils'; import {findDOMNode} from 'react-dom'; import React from 'react'; import ThrottleError from '../../common/ThrottleError'; import classNames from 'classnames'; const GridUIMixin = { /** * Table content click event handler * * @param {Event} event */ _handleBodyClick(event) { const target = event.target; const refParent = parents(target, '[ref]')[0]; let element; if (target.classList.contains('dgrid-cell')) { element = event.target; } else { element = parents(target, 'td.dgrid-cell')[0]; } if ( element && !(refParent && refParent.hasAttribute('disabled')) ) { this._handleCellClick(event, element, (refParent || event.target).getAttribute('ref')); } }, /** * Cell click handler * * @param {Event} event Event object * @param {HTMLElement} element Cell DOM element * @param {string} ref Click handler name in the table configuration */ _handleCellClick(event, element, ref) { const colId = element.getAttribute('key'); const row = element.parentNode.getAttribute('key'); const columnConfig = this.props.cols[colId]; const recordId = this.state.recordsInfo[row].id; const record = this._getRecordWithChanges(row); // Trigger click handler on the table configuration if (ref) { columnConfig.onClickRefs[ref](event, recordId, record, this); } else if (columnConfig.onClick) { columnConfig.onClick(event, recordId, record, this); } // Open cell editor if (this.props.cols[colId].editor) { this._renderEditor(element, row, colId); } }, // TODO Deprecated _handleHeaderCellClick(col, event) { const target = event.target; const refParent = parents(target, '[ref]')[0]; const ref = (refParent || target).getAttribute('ref'); let handler; if (ref && col.onClickRefs) { handler = col.onClickRefs[ref]; if (handler) { return handler(event, this); } } if (col.onClick) { col.onClick(event, this); } }, /** * Fetch server data */ updateTable: async function () { this.setState({showLoader: true}); if (!this.props.model) { return; } const viewCount = this.getViewCount(); let obj; try { obj = await this._loadData({ limit: viewCount, offset: this.state.page * viewCount, sort: this._sortingToArray(), fields: this._getFieldsToRender(), extra: this._getAdditionalIds() }); } catch (e) { if (!(e instanceof ThrottleError)) { throw e; } return; } if (!this._isMounted) { return; } if (this.getViewCount() && !obj.hasOwnProperty('count')) {
// If required page is not included in the range of existing pages, // request existing in a moment page const page = this._checkPage(this.state.page, this.getViewCount(), obj.count); if (page !== this.state.page) { this.state.page = page; this.updateTable(); return; } const data = this._dataArrayToObject(obj.records); const extra = this._dataArrayToObject(obj.extraRecords || []); const recordIds = Object.keys(data.records).concat(Object.keys(extra.records)); await toPromise(this.setState.bind(this), true)({ data: Object.assign({}, data.records, extra.records), mainIds: Object.keys(data.records), count: obj.count, totals: obj.totals, recordsInfo: Object.assign({}, extra.info, data.info), errors: pick(this.state.errors, recordIds), changes: pick(this.state.changes, recordIds), statuses: pick(this.state.statuses, recordIds) }); this._renderBody(); this.setState({showLoader: false}); }, _getHeaderCellHTML(columnName) { const cellHtml = typeof columnName === 'function' ? columnName(this) : columnName; if (cellHtml === undefined) { return ''; } return cellHtml; }, _escapeRecord(columnId, record) { let field; let type; let i; const escapedRecord = {}; const column = this.props.cols[columnId]; const needEscaping = !column.hasOwnProperty('escape') || column.escape; const fields = column.render.slice(0, -1); for (i = 0; i < fields.length; i++) { field = fields[i]; type = typeof record[field]; if (needEscaping) { if (type === 'string') { escapedRecord[field] = escape(record[field]); continue; } if (type === 'object' && record[field] && !this.state.colsWithEscapeErrors[columnId]) { this.state.colsWithEscapeErrors[columnId] = true; console.error( `UIKernel.Grid warning: ` + `You send record with fields of Object type in escaped column "${columnId}". ` + `To use Objects, set column config "escape" to false, ` + `and escape "${columnId}" field in render function by yourself` ); } } escapedRecord[field] = record[field]; } return escapedRecord; }, /** * Get table cell HTML * * @param {number} columnId Column ID * @param {Object} record Table record (initial record + changes) * @param {boolean} selected "Selected" row status * @param {Object} initialRecord Initial record * @returns {string} Table cell HTML * @private */ _getCellHTML(columnId, record, selected, initialRecord) { const render = last(this.props.cols[columnId].render); const cellHtml = render( this._escapeRecord(columnId, record), selected, this._escapeRecord(columnId, initialRecord), this ); return `${isDefined(cellHtml) ? cellHtml : ''}`; }, /** * Get table row HTML * * @param {number} rowId Row ID * @param {string} className <TR> class attribute * @returns {string} Table row HTML * @private */ _getRowHTML(rowId, className) { let colId; const record = this._getRecordWithChanges(rowId); const initialRecord = this.state.data[rowId] || null; const selected = this.isSelected(this.state.recordsInfo[rowId].id); const gridRowClass = classNames( className, this._getRowStatusNames(rowId).join(' '), {'dgrid__row_selected': selected} ); let html = `<tr key="${rowId}" class="${gridRowClass}">`; for (colId of Object.keys(this.props.cols)) { if (this._isViewColumn(colId)) { const gridCellClass = classNames(this._getColumnClass(colId), { 'dgrid-cell': true, 'dgrid-changed': this._isChanged(rowId, this._getBindParam(colId)), 'dgrid-error': this._hasError(rowId, this._getBindParam(colId)), 'dgrid-warning': this._hasWarning(rowId, this._getBindParam(colId)) }); html += ` <td key="${colId}" class="${gridCellClass}"> ${this._getCellHTML(colId, record, selected, initialRecord)} </td>`; } } return `${html}</tr>`; }, /** * Redraw table content totally * * @private */ _renderBody() { if (!this.state.data) { return; } let i; let row; let htmlExtra = ''; let htmlBody = ''; const sorted = pairs(this.state.recordsInfo).sort((a, b) => a[1].index - b[1].index); for (i = 0; i < sorted.length; i++) { row = sorted[i][0]; if (this._isMainRow(row)) { htmlBody += this._getRowHTML(row); } else if (this._isChanged(row) || this._getRowStatusNames(row).length) { htmlExtra += this._getRowHTML(row, 'others'); } } this.tBody.innerHTML = htmlExtra + htmlBody; }, /** * Display model changes * * @param {string} row Row ID * @param {string} param Model parameter * @private */ _renderBinds(row, param) { // If parameter does not affect on the redraw, do nothing if (!this._isFieldAffectsRender(param)) { return; } const selected = this.isSelected(this.state.recordsInfo[row].id); // Update column dependencies for (const column of this._getDependentColumns(param)) { if (this._isViewColumn(column) && !this._isEditorVisible(row, column)) { this._renderCell(row, column, selected); } } }, _removeTR(rowId) { findDOMNode(this.body).deleteRow(rowId); }, _renderTotals(isScrollable) { let totalsDisplayed = false; let i; let className; let totalsRowHTML = ''; const header = this._formHeader(); // If data for result line display exists, form it if (this.state.totals) { for (i of Object.keys(this.props.cols)) { if (!this._isViewColumn(i)) { continue; } className = this.props.cols[i].className; if (className) { totalsRowHTML += `<td class="${className}">`; } else { totalsRowHTML += '<td>'; } if (this.state.totals.hasOwnProperty(i)) { totalsRowHTML += this._getCellHTML(i, this.state.totals, false, this.state.totals); totalsDisplayed = true; } totalsRowHTML += '</td>'; } } if (!totalsDisplayed) { return null; } if (isScrollable) { return ( <table cellSpacing="0" className="dgrid-totals"> <colgroup>{header.colGroup}</colgroup> <tr dangerouslySetInnerHTML={{__html: totalsRowHTML}}/> </table> ); } return ( <tfoot className="dgrid-totals"> <tr dangerouslySetInnerHTML={{__html: totalsRowHTML}}/> </tfoot> ); }, _renderCell(rowId, column, isSelected) { const cell = findDOMNode(this.body).querySelector(`tr[key="${rowId}"] td[key=${column}]`); const initialRecord = this.state.data[rowId] || null; const cellHTML = this._getCellHTML(column, this._getRecordWithChanges(rowId), isSelected, initialRecord); try { cell.innerHTML = cellHTML; } catch (e) { // Sometimes it is possible a situation when rerendering of the cell is called in the middle of performing of an // event in that cell which may cause an error like "DOMException: The node to be removed is no longer a child // of this node", so just ignore it } cell.classList.remove('dgrid-changed', 'dgrid-error', 'dgrid-warning'); const cellClassList = []; if (this._isChanged(rowId, this._getBindParam(column))) { cellClassList.push('dgrid-changed'); } if (this._hasError(rowId, this._getBindParam(column))) { cellClassList.push('dgrid-error'); } if (this._hasWarning(rowId, this._getBindParam(column))) { cellClassList.push('dgrid-warning'); } cell.classList.add(...cellClassList); }, async _updateRow(row) { if (!this.state.data) { return; } if (this.state.data[row]) { const selected = this.isSelected(this.state.recordsInfo[row].id); const viewColumns = Object.keys(this.props.cols).filter(this._isViewColumn); for (const viewColumn of viewColumns) { if (!this._isEditorVisible(row, viewColumn)) { this._renderCell(row, viewColumn, selected); } } } else { await this.updateTable(); // TODO Check is it need } } }; export default GridUIMixin;
throw new Error('Incorrect response from GridModel. "response.count" not defined'); }
abt_did.py
import base64 import json import logging from datetime import datetime from datetime import timezone from forge_sdk import utils from forge_sdk.did import lib from forge_sdk.did.lib import HASH_MAP from forge_sdk.did.lib import KEY_MAP from forge_sdk.did.lib import ROLE_MAP from forge_sdk.mcrypto.hasher import Hasher from forge_sdk.mcrypto.signer import Signer logger = logging.getLogger('abt-did') class AbtDid: PREFIX = 'did:abt:' MIN_30 = 1800 def __init__(self, role_type='account', key_type='ed25519', hash_type='sha3', **kwargs): """ Initialize an AbtDid Instance. Args: role_type(string): role type of this did instance, default is 'account' key_type(string): key type of this did instance, default is 'ed25519' hash_type: hash type of this did instance, default is 'sha3' Kwargs: encode(bool): if the calculated did address should be encoded. Defaults to True. form(stirng): can be either 'short' or 'long'. Decides if the did address should include the prefix or not. Defaults to be 'long'. """ self.role_type = role_type assert (role_type in ROLE_MAP.keys()) self.key_type = key_type assert (key_type in KEY_MAP.keys()) self.signer = Signer(key_type) self.hash_type = hash_type assert (hash_type in HASH_MAP.keys()) self.hasher = Hasher(name=hash_type, rd=kwargs.get('rd')) self.encode = kwargs.get('encode', True) self.form = kwargs.get('form', 'long') def new(self): """ Generate a new did address Returns: string """ sk, pk = self.signer.keypair() return sk, pk, self.sk_to_did(sk) @staticmethod def parse_type_from_did(did, rd=None): """ Parse the correct DID type used in provided did address Args: did(string): did address Returns: :obj:`AbtDid` Examples: >>> did_type = AbtDid.parse_type_from_did('did:abt:z1jqq6DaT76Q9aTRZ4ndNjh9AthotPBvyEP') >>> did_type.hash_type 'sha3' >>> did_type.role_type 'account' >>> did_type.key_type 'ed25519' """ try: did = did.lstrip(AbtDid.PREFIX) decoded = utils.multibase_b58decode(did) type_bytes = decoded[0:2] return AbtDid._bytes_to_type(type_bytes, rd=rd) except Exception as e: logger.error('Fail to parse type from given did {}'.format(did)) logger.error(e, exc_info=True) def sk_to_did(self, sk): """ Use provided secret key to create a DID Args: sk(bytes): secret key Returns: string Examples: >>> import base64 >>> sk = base64.b16decode('3E0F9A313300226D51E33D5D98A126E86396956122E97E32D31CEE2277380B83FF47B3022FA503EAA1E9FA4B20FA8B16694EA56096F3A2E9109714062B3486D9') >>> AbtDid().sk_to_did(sk) 'did:abt:z1ioGHFYiEemfLa3hQjk4JTwWTQPu1g2YxP' """ pk = self.signer.sk_to_pk(sk) return self.pk_to_did(pk) def pk_to_did(self, pk): """ Use provided public key to create a DID Args: pk(bytes): public key Returns: string Examples: >>> import base64 >>> pk = base64.b16decode('A5AB55816BB81D2526D5CAE3CE3082F4F2FAF9D658D8938EC085E8BADAFF5B9F') >>> AbtDid().pk_to_did(pk) 'did:abt:z1XEw92uJKkTqyTuMnFFQ1BrgkGinfz72dF' """ pk_hash = self.hasher.hash(pk) return self.hash_to_did(pk_hash) def hash_to_did(self, hash): if not isinstance(hash, bytes): hash = hash.encode() try: hash = base64.b16decode(hash, True) except Exception: hash = hash type_bytes = self._type_to_bytes() extended_hash = type_bytes + hash[0:20] did_bytes = extended_hash + self.hasher.hash(extended_hash)[0:4] encoded_did = utils.multibase_b58encode(did_bytes) if not self.encode: return did_bytes elif self.form == 'long': return AbtDid.PREFIX + encoded_did else: return encoded_did @staticmethod def is_match_pk(did, pk, rd=None): """ check if the provided did is calculated from provided public key Args: did(string): did address pk(bytes): public key Returns: bool Examples: >>> import base64 >>> pk = base64.b16decode('A5AB55816BB81D2526D5CAE3CE3082F4F2FAF9D658D8938EC085E8BADAFF5B9F') >>> did_address ='did:abt:z1XEw92uJKkTqyTuMnFFQ1BrgkGinfz72dF' >>> AbtDid.is_match_pk(did_address, pk) True """ if did.startswith(AbtDid.PREFIX): did = did.lstrip(AbtDid.PREFIX) try: decoded = utils.multibase_b58decode(did) type_bytes = decoded[0:2] did_type = AbtDid._bytes_to_type(type_bytes, rd=rd) if did == did_type.pk_to_did(pk).lstrip(AbtDid.PREFIX): return True return False except Exception as e: logger.error('Fail to match pk {}'.format(pk)) logger.error(e, exc_info=True) return False @staticmethod def is_valid(did, rd=None): """ Check is the provided DID address valid Args: did(string): DID address Returns: bool Examples: >>> AbtDid.is_valid('did:abt:z1XEw92uJKkTqyTuMnFFQ1BrgkGinfz72dF') True >>> AbtDid.is_valid('did:abt:z1XEw92uJKkTqyTuMnFFQ1Brgk72dF') False """ did = did.lstrip('did:abt:')
pk_hash = decoded[2:22] actual_check_sum = decoded[22:26] did_type = AbtDid._bytes_to_type(type_bytes, rd=rd) expectued_check_sum = did_type.hasher.hash(type_bytes + pk_hash)[ 0:4] if actual_check_sum == expectued_check_sum: return True return False except Exception as e: logger.error('Fail to verify did {}'.format(did)) logger.error(e, exc_info=True) return False def _type_to_bytes(self): role_bits = lib.to_six_bits(ROLE_MAP.get(self.role_type)) key_bits = lib.to_five_bits(KEY_MAP.get(self.key_type)) hash_bits = lib.to_five_bits(HASH_MAP.get(self.hash_type)) first_byte = bytes([int((role_bits + key_bits[0:2]), 2)]) second_byte = bytes([int((key_bits[2:] + hash_bits), 2)]) return first_byte + second_byte @staticmethod def _bytes_to_type(input_bytes, rd): bits = bin(utils.bytes_to_int(input_bytes) | 65536)[3:] role_type = lib.get_did_type_key(ROLE_MAP, bits[0:6]) key_type = lib.get_did_type_key(KEY_MAP, bits[6:11]) hash_type = lib.get_did_type_key(HASH_MAP, bits[11:16]) return AbtDid(role_type=role_type, key_type=key_type, hash_type=hash_type, rd=rd) def gen_and_sign(self, sk, extra): """ Generate and Sign JWT token Args: sk(bytes): secret key extra(dict): additional data to be included in the token Returns: string Examples: >>> import base64 >>> sk = base64.b16decode('5C57BE8571841383774398891CA42917924B244513FB923E201D60D8795F682EF04A5204D6C529FBB3C435F62E042DB4E6D2BBF839A723A83A8B30740F0AD524') >>> res = AbtDid().gen_and_sign(sk, {'origin': 'testdata'}) >>> res.split(".")[0] 'eyJhbGciOiAiRWQyNTUxOSIsICJ0eXAiOiAiSldUIn0' """ now = round(datetime.now(timezone.utc).timestamp()) middle = lib.clean_dict({'iss': self.sk_to_did(sk), 'iat': now, 'nbf': now, 'exp': now + AbtDid.MIN_30, **extra, }) body = utils.multibase_b64encode(json.dumps(middle)) data = self._header() + '.' + body signature = utils.multibase_b64encode( self.signer.sign(data.encode(), sk)) return data + '.' + signature def _header(self): if self.key_type == 'ed25519': alg = 'Ed25519' elif self.key_type == 'secp256k1': alg = 'ES256K' return utils.multibase_b64encode(json.dumps({ 'alg': alg, 'typ': 'JWT' })) @staticmethod def verify(token, pk): """ Verify if the token matches the public key Args: token(string): JWT token pk(bytes): public key Returns: bool Examples: >>> import base64 >>> token='eyJhbGciOiAiRWQyNTUxOSIsICJ0eXAiOiAiSldUIn0.eyJpc3MiOiAiZGlkOmFidDp6MWYyaFB1ZEZnanRhOGNkRVYyeFRZaGRhcjNEb2ZxSGhkNiIsICJpYXQiOiAxNTU2NzcyNDE1LCAibmJmIjogMTU1Njc3MjQxNSwgImV4cCI6IDE1NTY3NzQyMTUsICJvcmlnaW4iOiAidGVzdGRhdGEifQ.sdbRA4_-gtMhlTRqhNzxnqYG-sFl3EGFOpVcsX6sSZ0E_33k6ga8jPTmNMkRz3DdFwnW_M62oK_-nFSw9wJQBw' >>> pk =base64.b16decode('F04A5204D6C529FBB3C435F62E042DB4E6D2BBF839A723A83A8B30740F0AD524') >>> AbtDid.verify(token, pk) True """ try: header, body, signature = token.split('.') alg = lib.b64decode_to_dict(header).get('alg').lower() if alg == 'secp256k1' or alg == 'es256k': signer = Signer('secp256k1') elif alg == 'ed25519': signer = Signer('ed25519') sig = utils.multibase_b64decode(signature) is_sig_valid = signer.verify((header + '.' + body).encode(), sig, pk) did = lib.b64decode_to_dict(body).get('iss') if is_sig_valid and AbtDid.is_match_pk(did, pk): return True return False except Exception as e: logger.error(e, exc_info=True) logger.error("Fail to verify token {0} and pk {1}" .format(token, pk)) return False
try: decoded = utils.multibase_b58decode(did) type_bytes = decoded[0:2]
get_build_step.py
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from .. import _utilities, _tables from . import outputs __all__ = [ 'GetBuildStepResult', 'AwaitableGetBuildStepResult', 'get_build_step', ] @pulumi.output_type class GetBuildStepResult: """ Build step resource properties """ def __init__(__self__, id=None, name=None, properties=None, type=None): if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name)
raise TypeError("Expected argument 'properties' to be a dict") pulumi.set(__self__, "properties", properties) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter def id(self) -> str: """ The resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> str: """ The name of the resource. """ return pulumi.get(self, "name") @property @pulumi.getter def properties(self) -> 'outputs.DockerBuildStepResponse': """ The properties of a build step. """ return pulumi.get(self, "properties") @property @pulumi.getter def type(self) -> str: """ The type of the resource. """ return pulumi.get(self, "type") class AwaitableGetBuildStepResult(GetBuildStepResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetBuildStepResult( id=self.id, name=self.name, properties=self.properties, type=self.type) def get_build_step(build_task_name: Optional[str] = None, registry_name: Optional[str] = None, resource_group_name: Optional[str] = None, step_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBuildStepResult: """ Build step resource properties API Version: 2018-02-01-preview. :param str build_task_name: The name of the container registry build task. :param str registry_name: The name of the container registry. :param str resource_group_name: The name of the resource group to which the container registry belongs. :param str step_name: The name of a build step for a container registry build task. """ __args__ = dict() __args__['buildTaskName'] = build_task_name __args__['registryName'] = registry_name __args__['resourceGroupName'] = resource_group_name __args__['stepName'] = step_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-nextgen:containerregistry:getBuildStep', __args__, opts=opts, typ=GetBuildStepResult).value return AwaitableGetBuildStepResult( id=__ret__.id, name=__ret__.name, properties=__ret__.properties, type=__ret__.type)
if properties and not isinstance(properties, dict):
edit.js
const request = require('request-promise-native'); const config = require('config'); const fs = require('fs'); const editor = require('editor'); const { promisify } = require('util'); const writeFile = promisify(fs.writeFile); const readFile = promisify(fs.readFile); const unlink = promisify(fs.unlink); exports.desc = 'Edit a single file from your ReadMe project without saving locally'; exports.category = 'services'; exports.weight = 4; exports.action = 'docs:edit'; exports.run = async function({ args, opts }) { const { key, version } = opts; if (!key) { return Promise.reject(new Error('No api key provided. Please use --key')); } if (!version) { return Promise.reject(new Error('No version provided. Please use --version')); } if (!args[0]) { return Promise.reject(new Error('No slug provided. Usage `rdme docs:edit <slug>`')); } const slug = args[0]; const filename = `${slug}.md`; const options = { auth: { user: key }, headers: { 'x-readme-version': version, }, }; const existingDoc = await request .get(`${config.host}/api/v1/docs/${slug}`, { json: true, ...options, }) .catch(err => { if (err.statusCode === 404) {
return Promise.reject(err); }); await writeFile(filename, existingDoc.body); return new Promise((resolve, reject) => { (opts.mockEditor || editor)(filename, async code => { if (code !== 0) return reject(new Error('Non zero exit code from $EDITOR')); const updatedDoc = await readFile(filename, 'utf8'); return request .put(`${config.host}/api/v1/docs/${slug}`, { json: Object.assign(existingDoc, { body: updatedDoc, }), ...options, }) .then(async () => { console.log('Doc successfully updated. Cleaning up local file'); await unlink(filename); return resolve(); }) .catch(err => { if (err.statusCode === 400) { return reject(err.error); } return reject(err); }); }); }); };
return Promise.reject(err.error); }
error.rs
//! Error reporting. For now very stupid and simplistic. use grammar::repr::Grammar; use std::io::{self, Write}; use super::{Action, TableConstructionError}; pub fn report_error<'grammar>(out: &mut Write, _grammar: &'grammar Grammar, error: &TableConstructionError<'grammar>) -> io::Result<()>
{ try!(writeln!(out, "when in this state:")); for item in error.items.vec.iter() { try!(writeln!(out, " {:?}", item)); } try!(writeln!(out, "and looking at a token `{:?}`,", error.lookahead)); try!(writeln!(out, "we can reduce to a `{}`", error.production.nonterminal)); match error.conflict { Action::Shift(_) => try!(writeln!(out, "but we can also shift")), Action::Reduce(prod) => try!(writeln!(out, "but we can also reduce to a `{}`", prod.nonterminal)), } Ok(()) }
ast_util.rs
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use ast::*; use ast; use ast_util; use codemap::Span; use opt_vec; use parse::token; use print::pprust; use visit::Visitor; use visit; use std::cell::{Cell, RefCell}; use std::cmp; use std::hashmap::HashMap; use std::u32; use std::local_data; pub fn path_name_i(idents: &[Ident]) -> ~str { // FIXME: Bad copies (#2543 -- same for everything else that says "bad") idents.map(|i| { token::get_ident(*i).get().to_str() }).connect("::") } // totally scary function: ignores all but the last element, should have // a different name pub fn path_to_ident(path: &Path) -> Ident { path.segments.last().unwrap().identifier } pub fn local_def(id: NodeId) -> DefId { ast::DefId { krate: LOCAL_CRATE, node: id } } pub fn is_local(did: ast::DefId) -> bool { did.krate == LOCAL_CRATE } pub fn stmt_id(s: &Stmt) -> NodeId { match s.node { StmtDecl(_, id) => id, StmtExpr(_, id) => id, StmtSemi(_, id) => id, StmtMac(..) => fail!("attempted to analyze unexpanded stmt") } } pub fn variant_def_ids(d: Def) -> Option<(DefId, DefId)> { match d { DefVariant(enum_id, var_id, _) => { Some((enum_id, var_id)) } _ => None } } pub fn def_id_of_def(d: Def) -> DefId { match d { DefFn(id, _) | DefStaticMethod(id, _, _) | DefMod(id) | DefForeignMod(id) | DefStatic(id, _) | DefVariant(_, id, _) | DefTy(id) | DefTyParam(id, _) | DefUse(id) | DefStruct(id) | DefTrait(id) | DefMethod(id, _) => { id } DefArg(id, _) | DefLocal(id, _) | DefSelfTy(id) | DefUpvar(id, _, _, _) | DefBinding(id, _) | DefRegion(id) | DefTyParamBinder(id) | DefLabel(id) => { local_def(id) } DefPrimTy(_) => fail!() } } pub fn binop_to_str(op: BinOp) -> ~str { match op { BiAdd => return ~"+", BiSub => return ~"-", BiMul => return ~"*", BiDiv => return ~"/", BiRem => return ~"%", BiAnd => return ~"&&", BiOr => return ~"||", BiBitXor => return ~"^", BiBitAnd => return ~"&", BiBitOr => return ~"|", BiShl => return ~"<<", BiShr => return ~">>", BiEq => return ~"==", BiLt => return ~"<", BiLe => return ~"<=", BiNe => return ~"!=", BiGe => return ~">=", BiGt => return ~">" } } pub fn binop_to_method_name(op: BinOp) -> Option<~str> { match op { BiAdd => return Some(~"add"), BiSub => return Some(~"sub"), BiMul => return Some(~"mul"), BiDiv => return Some(~"div"), BiRem => return Some(~"rem"), BiBitXor => return Some(~"bitxor"), BiBitAnd => return Some(~"bitand"), BiBitOr => return Some(~"bitor"), BiShl => return Some(~"shl"), BiShr => return Some(~"shr"), BiLt => return Some(~"lt"), BiLe => return Some(~"le"), BiGe => return Some(~"ge"), BiGt => return Some(~"gt"), BiEq => return Some(~"eq"), BiNe => return Some(~"ne"), BiAnd | BiOr => return None } } pub fn lazy_binop(b: BinOp) -> bool { match b { BiAnd => true, BiOr => true, _ => false } } pub fn is_shift_binop(b: BinOp) -> bool { match b { BiShl => true, BiShr => true, _ => false } } pub fn unop_to_str(op: UnOp) -> &'static str { match op { UnBox => "@", UnUniq => "~", UnDeref => "*", UnNot => "!", UnNeg => "-", } } pub fn is_path(e: @Expr) -> bool { return match e.node { ExprPath(_) => true, _ => false }; } pub fn int_ty_to_str(t: IntTy) -> ~str { match t { TyI => ~"", TyI8 => ~"i8", TyI16 => ~"i16", TyI32 => ~"i32", TyI64 => ~"i64" } } pub fn int_ty_max(t: IntTy) -> u64 { match t { TyI8 => 0x80u64, TyI16 => 0x8000u64, TyI | TyI32 => 0x80000000u64, // actually ni about TyI TyI64 => 0x8000000000000000u64 } } pub fn uint_ty_to_str(t: UintTy) -> ~str { match t { TyU => ~"u", TyU8 => ~"u8", TyU16 => ~"u16", TyU32 => ~"u32", TyU64 => ~"u64" } } pub fn uint_ty_max(t: UintTy) -> u64 { match t { TyU8 => 0xffu64, TyU16 => 0xffffu64, TyU | TyU32 => 0xffffffffu64, // actually ni about TyU TyU64 => 0xffffffffffffffffu64 } } pub fn float_ty_to_str(t: FloatTy) -> ~str { match t { TyF32 => ~"f32", TyF64 => ~"f64" } } pub fn is_call_expr(e: @Expr) -> bool { match e.node { ExprCall(..) => true, _ => false } } pub fn block_from_expr(e: @Expr) -> P<Block> { P(Block { view_items: ~[], stmts: ~[], expr: Some(e), id: e.id, rules: DefaultBlock, span: e.span }) } pub fn ident_to_path(s: Span, identifier: Ident) -> Path { ast::Path { span: s, global: false, segments: ~[ ast::PathSegment { identifier: identifier, lifetimes: opt_vec::Empty, types: opt_vec::Empty, } ], } } pub fn ident_to_pat(id: NodeId, s: Span, i: Ident) -> @Pat { @ast::Pat { id: id, node: PatIdent(BindByValue(MutImmutable), ident_to_path(s, i), None), span: s } } pub fn is_unguarded(a: &Arm) -> bool { match a.guard { None => true, _ => false } } pub fn unguarded_pat(a: &Arm) -> Option<~[@Pat]> { if is_unguarded(a) { Some(/* FIXME (#2543) */ a.pats.clone()) } else { None } } /// Generate a "pretty" name for an `impl` from its type and trait. /// This is designed so that symbols of `impl`'d methods give some /// hint of where they came from, (previously they would all just be /// listed as `__extensions__::method_name::hash`, with no indication /// of the type). pub fn impl_pretty_name(trait_ref: &Option<TraitRef>, ty: &Ty) -> Ident { let mut pretty = pprust::ty_to_str(ty); match *trait_ref { Some(ref trait_ref) => { pretty.push_char('.'); pretty.push_str(pprust::path_to_str(&trait_ref.path)); } None => {} } token::gensym_ident(pretty) } pub fn public_methods(ms: ~[@Method]) -> ~[@Method] { ms.move_iter().filter(|m| { match m.vis { Public => true, _ => false } }).collect() } // extract a TypeMethod from a TraitMethod. if the TraitMethod is // a default, pull out the useful fields to make a TypeMethod pub fn trait_method_to_ty_method(method: &TraitMethod) -> TypeMethod { match *method { Required(ref m) => (*m).clone(), Provided(ref m) => { TypeMethod { ident: m.ident, attrs: m.attrs.clone(), purity: m.purity, decl: m.decl, generics: m.generics.clone(), explicit_self: m.explicit_self, id: m.id, span: m.span, } } } } pub fn split_trait_methods(trait_methods: &[TraitMethod]) -> (~[TypeMethod], ~[@Method]) { let mut reqd = ~[]; let mut provd = ~[]; for trt_method in trait_methods.iter() { match *trt_method { Required(ref tm) => reqd.push((*tm).clone()), Provided(m) => provd.push(m) } }; (reqd, provd) } pub fn struct_field_visibility(field: ast::StructField) -> Visibility { match field.node.kind { ast::NamedField(_, visibility) => visibility, ast::UnnamedField => ast::Public } } /// Maps a binary operator to its precedence pub fn operator_prec(op: ast::BinOp) -> uint { match op { // 'as' sits here with 12 BiMul | BiDiv | BiRem => 11u, BiAdd | BiSub => 10u, BiShl | BiShr => 9u, BiBitAnd => 8u, BiBitXor => 7u, BiBitOr => 6u, BiLt | BiLe | BiGe | BiGt => 4u, BiEq | BiNe => 3u, BiAnd => 2u, BiOr => 1u } } /// Precedence of the `as` operator, which is a binary operator /// not appearing in the prior table. pub static as_prec: uint = 12u; pub fn empty_generics() -> Generics { Generics {lifetimes: opt_vec::Empty, ty_params: opt_vec::Empty} } // ______________________________________________________________________ // Enumerating the IDs which appear in an AST #[deriving(Encodable, Decodable)] pub struct IdRange { min: NodeId, max: NodeId, } impl IdRange { pub fn max() -> IdRange { IdRange { min: u32::MAX, max: u32::MIN, } } pub fn empty(&self) -> bool { self.min >= self.max } pub fn add(&mut self, id: NodeId) { self.min = cmp::min(self.min, id); self.max = cmp::max(self.max, id + 1); } } pub trait IdVisitingOperation { fn visit_id(&self, node_id: NodeId); } pub struct IdVisitor<'a, O> { operation: &'a O, pass_through_items: bool, visited_outermost: bool, } impl<'a, O: IdVisitingOperation> IdVisitor<'a, O> { fn visit_generics_helper(&self, generics: &Generics) { for type_parameter in generics.ty_params.iter() { self.operation.visit_id(type_parameter.id) } for lifetime in generics.lifetimes.iter() { self.operation.visit_id(lifetime.id) } } } impl<'a, O: IdVisitingOperation> Visitor<()> for IdVisitor<'a, O> { fn visit_mod(&mut self, module: &Mod, _: Span, node_id: NodeId, env: ()) { self.operation.visit_id(node_id); visit::walk_mod(self, module, env) } fn visit_view_item(&mut self, view_item: &ViewItem, env: ()) { match view_item.node { ViewItemExternMod(_, _, node_id) => { self.operation.visit_id(node_id) } ViewItemUse(ref view_paths) => { for view_path in view_paths.iter() { match view_path.node { ViewPathSimple(_, _, node_id) | ViewPathGlob(_, node_id) => { self.operation.visit_id(node_id) } ViewPathList(_, ref paths, node_id) => { self.operation.visit_id(node_id); for path in paths.iter() { self.operation.visit_id(path.node.id) } } } } } } visit::walk_view_item(self, view_item, env) } fn visit_foreign_item(&mut self, foreign_item: &ForeignItem, env: ()) { self.operation.visit_id(foreign_item.id); visit::walk_foreign_item(self, foreign_item, env) } fn visit_item(&mut self, item: &Item, env: ()) { if !self.pass_through_items { if self.visited_outermost { return } else { self.visited_outermost = true } } self.operation.visit_id(item.id); match item.node { ItemEnum(ref enum_definition, _) => { for variant in enum_definition.variants.iter() { self.operation.visit_id(variant.node.id) } } _ => {} } visit::walk_item(self, item, env); self.visited_outermost = false } fn visit_local(&mut self, local: &Local, env: ()) { self.operation.visit_id(local.id); visit::walk_local(self, local, env) } fn visit_block(&mut self, block: &Block, env: ()) { self.operation.visit_id(block.id); visit::walk_block(self, block, env) } fn visit_stmt(&mut self, statement: &Stmt, env: ()) { self.operation.visit_id(ast_util::stmt_id(statement)); visit::walk_stmt(self, statement, env) } fn visit_pat(&mut self, pattern: &Pat, env: ()) { self.operation.visit_id(pattern.id); visit::walk_pat(self, pattern, env) } fn visit_expr(&mut self, expression: &Expr, env: ()) { { let optional_callee_id = expression.get_callee_id(); for callee_id in optional_callee_id.iter() { self.operation.visit_id(*callee_id) } } self.operation.visit_id(expression.id); visit::walk_expr(self, expression, env) } fn
(&mut self, typ: &Ty, env: ()) { self.operation.visit_id(typ.id); match typ.node { TyPath(_, _, id) => self.operation.visit_id(id), _ => {} } visit::walk_ty(self, typ, env) } fn visit_generics(&mut self, generics: &Generics, env: ()) { self.visit_generics_helper(generics); visit::walk_generics(self, generics, env) } fn visit_fn(&mut self, function_kind: &visit::FnKind, function_declaration: &FnDecl, block: &Block, span: Span, node_id: NodeId, env: ()) { if !self.pass_through_items { match *function_kind { visit::FkMethod(..) if self.visited_outermost => return, visit::FkMethod(..) => self.visited_outermost = true, _ => {} } } self.operation.visit_id(node_id); match *function_kind { visit::FkItemFn(_, generics, _, _) | visit::FkMethod(_, generics, _) => { self.visit_generics_helper(generics) } visit::FkFnBlock => {} } for argument in function_declaration.inputs.iter() { self.operation.visit_id(argument.id) } visit::walk_fn(self, function_kind, function_declaration, block, span, node_id, env); if !self.pass_through_items { match *function_kind { visit::FkMethod(..) => self.visited_outermost = false, _ => {} } } } fn visit_struct_field(&mut self, struct_field: &StructField, env: ()) { self.operation.visit_id(struct_field.node.id); visit::walk_struct_field(self, struct_field, env) } fn visit_struct_def(&mut self, struct_def: &StructDef, ident: ast::Ident, generics: &ast::Generics, id: NodeId, _: ()) { self.operation.visit_id(id); struct_def.ctor_id.map(|ctor_id| self.operation.visit_id(ctor_id)); visit::walk_struct_def(self, struct_def, ident, generics, id, ()); } fn visit_trait_method(&mut self, tm: &ast::TraitMethod, _: ()) { match *tm { ast::Required(ref m) => self.operation.visit_id(m.id), ast::Provided(ref m) => self.operation.visit_id(m.id), } visit::walk_trait_method(self, tm, ()); } } pub fn visit_ids_for_inlined_item<O: IdVisitingOperation>(item: &InlinedItem, operation: &O) { let mut id_visitor = IdVisitor { operation: operation, pass_through_items: true, visited_outermost: false, }; visit::walk_inlined_item(&mut id_visitor, item, ()); } struct IdRangeComputingVisitor { result: Cell<IdRange>, } impl IdVisitingOperation for IdRangeComputingVisitor { fn visit_id(&self, id: NodeId) { let mut id_range = self.result.get(); id_range.add(id); self.result.set(id_range) } } pub fn compute_id_range_for_inlined_item(item: &InlinedItem) -> IdRange { let visitor = IdRangeComputingVisitor { result: Cell::new(IdRange::max()) }; visit_ids_for_inlined_item(item, &visitor); visitor.result.get() } pub fn is_item_impl(item: @ast::Item) -> bool { match item.node { ItemImpl(..) => true, _ => false } } pub fn walk_pat(pat: &Pat, it: |&Pat| -> bool) -> bool { if !it(pat) { return false; } match pat.node { PatIdent(_, _, Some(p)) => walk_pat(p, it), PatStruct(_, ref fields, _) => { fields.iter().advance(|f| walk_pat(f.pat, |p| it(p))) } PatEnum(_, Some(ref s)) | PatTup(ref s) => { s.iter().advance(|&p| walk_pat(p, |p| it(p))) } PatUniq(s) | PatRegion(s) => { walk_pat(s, it) } PatVec(ref before, ref slice, ref after) => { before.iter().advance(|&p| walk_pat(p, |p| it(p))) && slice.iter().advance(|&p| walk_pat(p, |p| it(p))) && after.iter().advance(|&p| walk_pat(p, |p| it(p))) } PatWild | PatWildMulti | PatLit(_) | PatRange(_, _) | PatIdent(_, _, _) | PatEnum(_, _) => { true } } } pub trait EachViewItem { fn each_view_item(&self, f: |&ast::ViewItem| -> bool) -> bool; } struct EachViewItemData<'a> { callback: 'a |&ast::ViewItem| -> bool, } impl<'a> Visitor<()> for EachViewItemData<'a> { fn visit_view_item(&mut self, view_item: &ast::ViewItem, _: ()) { let _ = (self.callback)(view_item); } } impl EachViewItem for ast::Crate { fn each_view_item(&self, f: |&ast::ViewItem| -> bool) -> bool { let mut visit = EachViewItemData { callback: f, }; visit::walk_crate(&mut visit, self, ()); true } } pub fn view_path_id(p: &ViewPath) -> NodeId { match p.node { ViewPathSimple(_, _, id) | ViewPathGlob(_, id) | ViewPathList(_, _, id) => id } } /// Returns true if the given struct def is tuple-like; i.e. that its fields /// are unnamed. pub fn struct_def_is_tuple_like(struct_def: &ast::StructDef) -> bool { struct_def.ctor_id.is_some() } /// Returns true if the given pattern consists solely of an identifier /// and false otherwise. pub fn pat_is_ident(pat: @ast::Pat) -> bool { match pat.node { ast::PatIdent(..) => true, _ => false, } } // HYGIENE FUNCTIONS /// Extend a syntax context with a given mark pub fn new_mark(m:Mrk, tail:SyntaxContext) -> SyntaxContext { new_mark_internal(m,tail,get_sctable()) } // Extend a syntax context with a given mark and table // FIXME #8215 : currently pub to allow testing pub fn new_mark_internal(m: Mrk, tail: SyntaxContext, table: &SCTable) -> SyntaxContext { let key = (tail,m); // FIXME #5074 : can't use more natural style because we're missing // flow-sensitivity. Results in two lookups on a hash table hit. // also applies to new_rename_internal, below. // let try_lookup = table.mark_memo.find(&key); let mut mark_memo = table.mark_memo.borrow_mut(); match mark_memo.get().contains_key(&key) { false => { let new_idx = { let mut table = table.table.borrow_mut(); idx_push(table.get(), Mark(m,tail)) }; mark_memo.get().insert(key,new_idx); new_idx } true => { match mark_memo.get().find(&key) { None => fail!("internal error: key disappeared 2013042901"), Some(idxptr) => {*idxptr} } } } } /// Extend a syntax context with a given rename pub fn new_rename(id:Ident, to:Name, tail:SyntaxContext) -> SyntaxContext { new_rename_internal(id, to, tail, get_sctable()) } // Extend a syntax context with a given rename and sctable // FIXME #8215 : currently pub to allow testing pub fn new_rename_internal(id: Ident, to: Name, tail: SyntaxContext, table: &SCTable) -> SyntaxContext { let key = (tail,id,to); // FIXME #5074 //let try_lookup = table.rename_memo.find(&key); let mut rename_memo = table.rename_memo.borrow_mut(); match rename_memo.get().contains_key(&key) { false => { let new_idx = { let mut table = table.table.borrow_mut(); idx_push(table.get(), Rename(id,to,tail)) }; rename_memo.get().insert(key,new_idx); new_idx } true => { match rename_memo.get().find(&key) { None => fail!("internal error: key disappeared 2013042902"), Some(idxptr) => {*idxptr} } } } } /// Make a fresh syntax context table with EmptyCtxt in slot zero /// and IllegalCtxt in slot one. // FIXME #8215 : currently pub to allow testing pub fn new_sctable_internal() -> SCTable { SCTable { table: RefCell::new(~[EmptyCtxt,IllegalCtxt]), mark_memo: RefCell::new(HashMap::new()), rename_memo: RefCell::new(HashMap::new()), } } // fetch the SCTable from TLS, create one if it doesn't yet exist. pub fn get_sctable() -> @SCTable { local_data_key!(sctable_key: @@SCTable) match local_data::get(sctable_key, |k| k.map(|k| *k)) { None => { let new_table = @@new_sctable_internal(); local_data::set(sctable_key,new_table); *new_table }, Some(intr) => *intr } } /// print out an SCTable for debugging pub fn display_sctable(table : &SCTable) { error!("SC table:"); let table = table.table.borrow(); for (idx,val) in table.get().iter().enumerate() { error!("{:4u} : {:?}",idx,val); } } /// Add a value to the end of a vec, return its index fn idx_push<T>(vec: &mut ~[T], val: T) -> u32 { vec.push(val); (vec.len() - 1) as u32 } /// Resolve a syntax object to a name, per MTWT. pub fn mtwt_resolve(id : Ident) -> Name { let resolve_table = get_resolve_table(); let mut resolve_table = resolve_table.borrow_mut(); resolve_internal(id, get_sctable(), resolve_table.get()) } // FIXME #8215: must be pub for testing pub type ResolveTable = HashMap<(Name,SyntaxContext),Name>; // okay, I admit, putting this in TLS is not so nice: // fetch the SCTable from TLS, create one if it doesn't yet exist. pub fn get_resolve_table() -> @RefCell<ResolveTable> { local_data_key!(resolve_table_key: @@RefCell<ResolveTable>) match local_data::get(resolve_table_key, |k| k.map(|k| *k)) { None => { let new_table = @@RefCell::new(HashMap::new()); local_data::set(resolve_table_key, new_table); *new_table }, Some(intr) => *intr } } // Resolve a syntax object to a name, per MTWT. // adding memoization to possibly resolve 500+ seconds in resolve for librustc (!) // FIXME #8215 : currently pub to allow testing pub fn resolve_internal(id : Ident, table : &SCTable, resolve_table : &mut ResolveTable) -> Name { let key = (id.name,id.ctxt); match resolve_table.contains_key(&key) { false => { let resolved = { let result = { let table = table.table.borrow(); table.get()[id.ctxt] }; match result { EmptyCtxt => id.name, // ignore marks here: Mark(_,subctxt) => resolve_internal(Ident{name:id.name, ctxt: subctxt},table,resolve_table), // do the rename if necessary: Rename(Ident{name,ctxt},toname,subctxt) => { let resolvedfrom = resolve_internal(Ident{name:name,ctxt:ctxt},table,resolve_table); let resolvedthis = resolve_internal(Ident{name:id.name,ctxt:subctxt},table,resolve_table); if (resolvedthis == resolvedfrom) && (marksof(ctxt,resolvedthis,table) == marksof(subctxt,resolvedthis,table)) { toname } else { resolvedthis } } IllegalCtxt() => fail!("expected resolvable context, got IllegalCtxt") } }; resolve_table.insert(key,resolved); resolved } true => { // it's guaranteed to be there, because we just checked that it was // there and we never remove anything from the table: *(resolve_table.find(&key).unwrap()) } } } /// Compute the marks associated with a syntax context. pub fn mtwt_marksof(ctxt: SyntaxContext, stopname: Name) -> ~[Mrk] { marksof(ctxt, stopname, get_sctable()) } // the internal function for computing marks // it's not clear to me whether it's better to use a [] mutable // vector or a cons-list for this. pub fn marksof(ctxt: SyntaxContext, stopname: Name, table: &SCTable) -> ~[Mrk] { let mut result = ~[]; let mut loopvar = ctxt; loop { let table_entry = { let table = table.table.borrow(); table.get()[loopvar] }; match table_entry { EmptyCtxt => { return result; }, Mark(mark, tl) => { xorPush(&mut result, mark); loopvar = tl; }, Rename(_,name,tl) => { // see MTWT for details on the purpose of the stopname. // short version: it prevents duplication of effort. if name == stopname { return result; } else { loopvar = tl; } } IllegalCtxt => fail!("expected resolvable context, got IllegalCtxt") } } } /// Return the outer mark for a context with a mark at the outside. /// FAILS when outside is not a mark. pub fn mtwt_outer_mark(ctxt: SyntaxContext) -> Mrk { let sctable = get_sctable(); let table = sctable.table.borrow(); match table.get()[ctxt] { ast::Mark(mrk,_) => mrk, _ => fail!("can't retrieve outer mark when outside is not a mark") } } /// Push a name... unless it matches the one on top, in which /// case pop and discard (so two of the same marks cancel) pub fn xorPush(marks: &mut ~[Mrk], mark: Mrk) { if (marks.len() > 0) && (getLast(marks) == mark) { marks.pop().unwrap(); } else { marks.push(mark); } } // get the last element of a mutable array. // FIXME #4903: , must be a separate procedure for now. pub fn getLast(arr: &~[Mrk]) -> Mrk { *arr.last().unwrap() } // are two paths equal when compared unhygienically? // since I'm using this to replace ==, it seems appropriate // to compare the span, global, etc. fields as well. pub fn path_name_eq(a : &ast::Path, b : &ast::Path) -> bool { (a.span == b.span) && (a.global == b.global) && (segments_name_eq(a.segments, b.segments)) } // are two arrays of segments equal when compared unhygienically? pub fn segments_name_eq(a : &[ast::PathSegment], b : &[ast::PathSegment]) -> bool { if a.len() != b.len() { false } else { for (idx,seg) in a.iter().enumerate() { if (seg.identifier.name != b[idx].identifier.name) // FIXME #7743: ident -> name problems in lifetime comparison? || (seg.lifetimes != b[idx].lifetimes) // can types contain idents? || (seg.types != b[idx].types) { return false; } } true } } // Returns true if this literal is a string and false otherwise. pub fn lit_is_str(lit: @Lit) -> bool { match lit.node { LitStr(..) => true, _ => false, } } #[cfg(test)] mod test { use ast::*; use super::*; use opt_vec; use std::hashmap::HashMap; fn ident_to_segment(id : &Ident) -> PathSegment { PathSegment {identifier:id.clone(), lifetimes: opt_vec::Empty, types: opt_vec::Empty} } #[test] fn idents_name_eq_test() { assert!(segments_name_eq([Ident{name:3,ctxt:4}, Ident{name:78,ctxt:82}].map(ident_to_segment), [Ident{name:3,ctxt:104}, Ident{name:78,ctxt:182}].map(ident_to_segment))); assert!(!segments_name_eq([Ident{name:3,ctxt:4}, Ident{name:78,ctxt:82}].map(ident_to_segment), [Ident{name:3,ctxt:104}, Ident{name:77,ctxt:182}].map(ident_to_segment))); } #[test] fn xorpush_test () { let mut s = ~[]; xorPush(&mut s, 14); assert_eq!(s.clone(), ~[14]); xorPush(&mut s, 14); assert_eq!(s.clone(), ~[]); xorPush(&mut s, 14); assert_eq!(s.clone(), ~[14]); xorPush(&mut s, 15); assert_eq!(s.clone(), ~[14, 15]); xorPush(&mut s, 16); assert_eq!(s.clone(), ~[14, 15, 16]); xorPush(&mut s, 16); assert_eq!(s.clone(), ~[14, 15]); xorPush(&mut s, 15); assert_eq!(s.clone(), ~[14]); } fn id(n: Name, s: SyntaxContext) -> Ident { Ident {name: n, ctxt: s} } // because of the SCTable, I now need a tidy way of // creating syntax objects. Sigh. #[deriving(Clone, Eq)] enum TestSC { M(Mrk), R(Ident,Name) } // unfold a vector of TestSC values into a SCTable, // returning the resulting index fn unfold_test_sc(tscs : ~[TestSC], tail: SyntaxContext, table: &SCTable) -> SyntaxContext { tscs.rev_iter().fold(tail, |tail : SyntaxContext, tsc : &TestSC| {match *tsc { M(mrk) => new_mark_internal(mrk,tail,table), R(ident,name) => new_rename_internal(ident,name,tail,table)}}) } // gather a SyntaxContext back into a vector of TestSCs fn refold_test_sc(mut sc: SyntaxContext, table : &SCTable) -> ~[TestSC] { let mut result = ~[]; loop { let table = table.table.borrow(); match table.get()[sc] { EmptyCtxt => {return result;}, Mark(mrk,tail) => { result.push(M(mrk)); sc = tail; continue; }, Rename(id,name,tail) => { result.push(R(id,name)); sc = tail; continue; } IllegalCtxt => fail!("expected resolvable context, got IllegalCtxt") } } } #[test] fn test_unfold_refold(){ let mut t = new_sctable_internal(); let test_sc = ~[M(3),R(id(101,0),14),M(9)]; assert_eq!(unfold_test_sc(test_sc.clone(),EMPTY_CTXT,&mut t),4); { let table = t.table.borrow(); assert_eq!(table.get()[2],Mark(9,0)); assert_eq!(table.get()[3],Rename(id(101,0),14,2)); assert_eq!(table.get()[4],Mark(3,3)); } assert_eq!(refold_test_sc(4,&t),test_sc); } // extend a syntax context with a sequence of marks given // in a vector. v[0] will be the outermost mark. fn unfold_marks(mrks: ~[Mrk], tail: SyntaxContext, table: &SCTable) -> SyntaxContext { mrks.rev_iter().fold(tail, |tail:SyntaxContext, mrk:&Mrk| {new_mark_internal(*mrk,tail,table)}) } #[test] fn unfold_marks_test() { let mut t = new_sctable_internal(); assert_eq!(unfold_marks(~[3,7],EMPTY_CTXT,&mut t),3); { let table = t.table.borrow(); assert_eq!(table.get()[2],Mark(7,0)); assert_eq!(table.get()[3],Mark(3,2)); } } #[test] fn test_marksof () { let stopname = 242; let name1 = 243; let mut t = new_sctable_internal(); assert_eq!(marksof (EMPTY_CTXT,stopname,&t),~[]); // FIXME #5074: ANF'd to dodge nested calls { let ans = unfold_marks(~[4,98],EMPTY_CTXT,&mut t); assert_eq! (marksof (ans,stopname,&t),~[4,98]);} // does xoring work? { let ans = unfold_marks(~[5,5,16],EMPTY_CTXT,&mut t); assert_eq! (marksof (ans,stopname,&t), ~[16]);} // does nested xoring work? { let ans = unfold_marks(~[5,10,10,5,16],EMPTY_CTXT,&mut t); assert_eq! (marksof (ans, stopname,&t), ~[16]);} // rename where stop doesn't match: { let chain = ~[M(9), R(id(name1, new_mark_internal (4, EMPTY_CTXT,&mut t)), 100101102), M(14)]; let ans = unfold_test_sc(chain,EMPTY_CTXT,&mut t); assert_eq! (marksof (ans, stopname, &t), ~[9,14]);} // rename where stop does match { let name1sc = new_mark_internal(4, EMPTY_CTXT, &mut t); let chain = ~[M(9), R(id(name1, name1sc), stopname), M(14)]; let ans = unfold_test_sc(chain,EMPTY_CTXT,&mut t); assert_eq! (marksof (ans, stopname, &t), ~[9]); } } #[test] fn resolve_tests () { let a = 40; let mut t = new_sctable_internal(); let mut rt = HashMap::new(); // - ctxt is MT assert_eq!(resolve_internal(id(a,EMPTY_CTXT),&mut t, &mut rt),a); // - simple ignored marks { let sc = unfold_marks(~[1,2,3],EMPTY_CTXT,&mut t); assert_eq!(resolve_internal(id(a,sc),&mut t, &mut rt),a);} // - orthogonal rename where names don't match { let sc = unfold_test_sc(~[R(id(50,EMPTY_CTXT),51),M(12)],EMPTY_CTXT,&mut t); assert_eq!(resolve_internal(id(a,sc),&mut t, &mut rt),a);} // - rename where names do match, but marks don't { let sc1 = new_mark_internal(1,EMPTY_CTXT,&mut t); let sc = unfold_test_sc(~[R(id(a,sc1),50), M(1), M(2)], EMPTY_CTXT,&mut t); assert_eq!(resolve_internal(id(a,sc),&mut t, &mut rt), a);} // - rename where names and marks match { let sc1 = unfold_test_sc(~[M(1),M(2)],EMPTY_CTXT,&mut t); let sc = unfold_test_sc(~[R(id(a,sc1),50),M(1),M(2)],EMPTY_CTXT,&mut t); assert_eq!(resolve_internal(id(a,sc),&mut t, &mut rt), 50); } // - rename where names and marks match by literal sharing { let sc1 = unfold_test_sc(~[M(1),M(2)],EMPTY_CTXT,&mut t); let sc = unfold_test_sc(~[R(id(a,sc1),50)],sc1,&mut t); assert_eq!(resolve_internal(id(a,sc),&mut t, &mut rt), 50); } // - two renames of the same var.. can only happen if you use // local-expand to prevent the inner binding from being renamed // during the rename-pass caused by the first: println!("about to run bad test"); { let sc = unfold_test_sc(~[R(id(a,EMPTY_CTXT),50), R(id(a,EMPTY_CTXT),51)], EMPTY_CTXT,&mut t); assert_eq!(resolve_internal(id(a,sc),&mut t, &mut rt), 51); } // the simplest double-rename: { let a_to_a50 = new_rename_internal(id(a,EMPTY_CTXT),50,EMPTY_CTXT,&mut t); let a50_to_a51 = new_rename_internal(id(a,a_to_a50),51,a_to_a50,&mut t); assert_eq!(resolve_internal(id(a,a50_to_a51),&mut t, &mut rt),51); // mark on the outside doesn't stop rename: let sc = new_mark_internal(9,a50_to_a51,&mut t); assert_eq!(resolve_internal(id(a,sc),&mut t, &mut rt),51); // but mark on the inside does: let a50_to_a51_b = unfold_test_sc(~[R(id(a,a_to_a50),51), M(9)], a_to_a50, &mut t); assert_eq!(resolve_internal(id(a,a50_to_a51_b),&mut t, &mut rt),50);} } #[test] fn mtwt_resolve_test(){ let a = 40; assert_eq!(mtwt_resolve(id(a,EMPTY_CTXT)),a); } #[test] fn hashing_tests () { let mut t = new_sctable_internal(); assert_eq!(new_mark_internal(12,EMPTY_CTXT,&mut t),2); assert_eq!(new_mark_internal(13,EMPTY_CTXT,&mut t),3); // using the same one again should result in the same index: assert_eq!(new_mark_internal(12,EMPTY_CTXT,&mut t),2); // I'm assuming that the rename table will behave the same.... } #[test] fn resolve_table_hashing_tests() { let mut t = new_sctable_internal(); let mut rt = HashMap::new(); assert_eq!(rt.len(),0); resolve_internal(id(30,EMPTY_CTXT),&mut t, &mut rt); assert_eq!(rt.len(),1); resolve_internal(id(39,EMPTY_CTXT),&mut t, &mut rt); assert_eq!(rt.len(),2); resolve_internal(id(30,EMPTY_CTXT),&mut t, &mut rt); assert_eq!(rt.len(),2); } }
visit_ty
main.go
/* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "context" "encoding/json" "fmt" "net/http" "os" "os/signal" "runtime" "sync" "syscall" "time" "github.com/containerd/containerd" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/plugin" metrics "github.com/docker/go-metrics" "github.com/sirupsen/logrus" "github.com/urfave/cli" ) const imageName = "docker.io/library/alpine:latest" var ( ct metrics.LabeledTimer execTimer metrics.LabeledTimer errCounter metrics.LabeledCounter binarySizeGauge metrics.LabeledGauge ) func init() { ns := metrics.NewNamespace("stress", "", nil) // if you want more fine grained metrics then you can drill down with the metrics in prom that // containerd is outputting ct = ns.NewLabeledTimer("run", "Run time of a full container during the test", "commit") execTimer = ns.NewLabeledTimer("exec", "Run time of an exec process during the test", "commit") binarySizeGauge = ns.NewLabeledGauge("binary_size", "Binary size of compiled binaries", metrics.Bytes, "name") errCounter = ns.NewLabeledCounter("errors", "Errors encountered running the stress tests", "err") metrics.Register(ns) // set higher ulimits if err := setRlimit(); err != nil { panic(err) } } type run struct { total int failures int started time.Time ended time.Time } func (r *run) start() { r.started = time.Now() } func (r *run) end() { r.ended = time.Now() } func (r *run) seconds() float64 { return r.ended.Sub(r.started).Seconds() } func (r *run) gather(workers []*worker) *result { for _, w := range workers { r.total += w.count r.failures += w.failures } sec := r.seconds() return &result{ Total: r.total, Seconds: sec, ContainersPerSecond: float64(r.total) / sec, SecondsPerContainer: sec / float64(r.total), } } type result struct { Total int `json:"total"` Failures int `json:"failures"` Seconds float64 `json:"seconds"` ContainersPerSecond float64 `json:"containersPerSecond"` SecondsPerContainer float64 `json:"secondsPerContainer"` ExecTotal int `json:"execTotal"` ExecFailures int `json:"execFailures"` } func main() { // morr power! runtime.GOMAXPROCS(runtime.NumCPU()) app := cli.NewApp() app.Name = "containerd-stress" app.Description = "stress test a containerd daemon" app.Flags = []cli.Flag{ cli.BoolFlag{ Name: "debug", Usage: "set debug output in the logs", }, cli.StringFlag{ Name: "address,a", Value: "/run/containerd/containerd.sock", Usage: "path to the containerd socket", }, cli.IntFlag{ Name: "concurrent,c", Value: 1, Usage: "set the concurrency of the stress test", }, cli.DurationFlag{ Name: "duration,d", Value: 1 * time.Minute, Usage: "set the duration of the stress test", }, cli.BoolFlag{ Name: "exec", Usage: "add execs to the stress tests", }, cli.BoolFlag{ Name: "json,j", Usage: "output results in json format", }, cli.StringFlag{ Name: "metrics,m", Usage: "address to serve the metrics API", }, cli.StringFlag{ Name: "runtime", Usage: "set the runtime to stress test", Value: plugin.RuntimeLinuxV1, }, } app.Before = func(context *cli.Context) error { if context.GlobalBool("json") { logrus.SetLevel(logrus.WarnLevel) } if context.GlobalBool("debug") { logrus.SetLevel(logrus.DebugLevel) } return nil } app.Commands = []cli.Command{ densityCommand, } app.Action = func(context *cli.Context) error { config := config{ Address: context.GlobalString("address"), Duration: context.GlobalDuration("duration"), Concurrency: context.GlobalInt("concurrent"), Exec: context.GlobalBool("exec"), JSON: context.GlobalBool("json"), Metrics: context.GlobalString("metrics"), Runtime: context.GlobalString("runtime"), } if config.Metrics != "" { return serve(config) } return test(config) } if err := app.Run(os.Args); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } } type config struct { Concurrency int Duration time.Duration Address string Exec bool JSON bool Metrics string Runtime string } func (c config) newClient() (*containerd.Client, error) { return containerd.New(c.Address, containerd.WithDefaultRuntime(c.Runtime)) } func serve(c config) error { go func() { if err := http.ListenAndServe(c.Metrics, metrics.Handler()); err != nil { logrus.WithError(err).Error("listen and serve") } }() checkBinarySizes() return test(c) } func test(c config) error { var ( wg sync.WaitGroup ctx = namespaces.WithNamespace(context.Background(), "stress") ) client, err := c.newClient() if err != nil {
if err := cleanup(ctx, client); err != nil { return err } logrus.Infof("pulling %s", imageName) image, err := client.Pull(ctx, imageName, containerd.WithPullUnpack) if err != nil { return err } tctx, cancel := context.WithTimeout(ctx, c.Duration) go func() { s := make(chan os.Signal, 1) signal.Notify(s, syscall.SIGTERM, syscall.SIGINT) <-s cancel() }() var ( workers []*worker r = &run{} ) logrus.Info("starting stress test run...") v, err := client.Version(ctx) if err != nil { return err } // create the workers along with their spec for i := 0; i < c.Concurrency; i++ { wg.Add(1) w := &worker{ id: i, wg: &wg, image: image, client: client, commit: v.Revision, } workers = append(workers, w) } var exec *execWorker if c.Exec { wg.Add(1) exec = &execWorker{ worker: worker{ id: c.Concurrency, wg: &wg, image: image, client: client, commit: v.Revision, }, } go exec.exec(ctx, tctx) } // start the timer and run the worker r.start() for _, w := range workers { go w.run(ctx, tctx) } // wait and end the timer wg.Wait() r.end() results := r.gather(workers) if c.Exec { results.ExecTotal = exec.count results.ExecFailures = exec.failures } logrus.Infof("ending test run in %0.3f seconds", results.Seconds) logrus.WithField("failures", r.failures).Infof( "create/start/delete %d containers in %0.3f seconds (%0.3f c/sec) or (%0.3f sec/c)", results.Total, results.Seconds, results.ContainersPerSecond, results.SecondsPerContainer, ) if c.JSON { if err := json.NewEncoder(os.Stdout).Encode(results); err != nil { return err } } return nil } // cleanup cleans up any containers in the "stress" namespace before the test run func cleanup(ctx context.Context, client *containerd.Client) error { containers, err := client.Containers(ctx) if err != nil { return err } for _, c := range containers { task, err := c.Task(ctx, nil) if err == nil { task.Delete(ctx, containerd.WithProcessKill) } if err := c.Delete(ctx, containerd.WithSnapshotCleanup); err != nil { if derr := c.Delete(ctx); derr == nil { continue } return err } } return nil }
return err } defer client.Close()
setup.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # !/usr/bin/env python import glob import os import torch from setuptools import find_packages from setuptools import setup from torch.utils.cpp_extension import CUDAExtension from torch.utils.cpp_extension import CUDA_HOME from torch.utils.cpp_extension import CppExtension requirements = ["torch", "torchvision"] def
(): this_dir = os.path.dirname(os.path.abspath(__file__)) extensions_dir = os.path.join(this_dir, "model", "csrc") main_file = glob.glob(os.path.join(extensions_dir, "*.cpp")) source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp")) source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu")) sources = main_file + source_cpu extension = CppExtension extra_compile_args = {"cxx": []} define_macros = [] if torch.cuda.is_available() and CUDA_HOME is not None: extension = CUDAExtension sources += source_cuda define_macros += [("WITH_CUDA", None)] extra_compile_args["nvcc"] = [ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] sources = [os.path.join(extensions_dir, s) for s in sources] include_dirs = [extensions_dir] ext_modules = [ extension( "model._C", sources, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args, ) ] return ext_modules setup( name="faster_rcnn", version="0.1", description="object detection in pytorch", packages=find_packages(exclude=("configs", "tests",)), # install_requires=requirements, ext_modules=get_extensions(), cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension}, )
get_extensions
test_dagrun.py
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import datetime from typing import Mapping, Optional from unittest import mock from unittest.mock import call import pendulum import pytest from sqlalchemy.orm.session import Session from airflow import settings from airflow.callbacks.callback_requests import DagCallbackRequest from airflow.decorators import task from airflow.models import DAG, DagBag, DagModel, DagRun, TaskInstance as TI, clear_task_instances from airflow.models.baseoperator import BaseOperator from airflow.models.taskmap import TaskMap from airflow.models.xcom_arg import XComArg from airflow.operators.empty import EmptyOperator from airflow.operators.python import ShortCircuitOperator from airflow.serialization.serialized_objects import SerializedDAG from airflow.stats import Stats from airflow.utils import timezone from airflow.utils.state import DagRunState, State, TaskInstanceState from airflow.utils.trigger_rule import TriggerRule from airflow.utils.types import DagRunType from tests.models import DEFAULT_DATE as _DEFAULT_DATE from tests.test_utils.db import clear_db_dags, clear_db_pools, clear_db_runs from tests.test_utils.mock_operators import MockOperator DEFAULT_DATE = pendulum.instance(_DEFAULT_DATE) class TestDagRun: dagbag = DagBag(include_examples=True) def setup_class(self) -> None: clear_db_runs() clear_db_pools() clear_db_dags() def teardown_method(self) -> None: clear_db_runs() clear_db_pools() clear_db_dags() def create_dag_run( self, dag: DAG, *, task_states: Optional[Mapping[str, TaskInstanceState]] = None, execution_date: Optional[datetime.datetime] = None, is_backfill: bool = False, session: Session, ): now = timezone.utcnow() if execution_date is None: execution_date = now execution_date = pendulum.instance(execution_date) if is_backfill: run_type = DagRunType.BACKFILL_JOB data_interval = dag.infer_automated_data_interval(execution_date) else: run_type = DagRunType.MANUAL data_interval = dag.timetable.infer_manual_data_interval(run_after=execution_date) dag_run = dag.create_dagrun( run_type=run_type, execution_date=execution_date, data_interval=data_interval, start_date=now, state=DagRunState.RUNNING, external_trigger=False, ) if task_states is not None: for task_id, task_state in task_states.items(): ti = dag_run.get_task_instance(task_id) ti.set_state(task_state, session) session.flush() return dag_run def test_clear_task_instances_for_backfill_dagrun(self, session): now = timezone.utcnow() dag_id = 'test_clear_task_instances_for_backfill_dagrun' dag = DAG(dag_id=dag_id, start_date=now) dag_run = self.create_dag_run(dag, execution_date=now, is_backfill=True, session=session) task0 = EmptyOperator(task_id='backfill_task_0', owner='test', dag=dag) ti0 = TI(task=task0, run_id=dag_run.run_id) ti0.run() qry = session.query(TI).filter(TI.dag_id == dag.dag_id).all() clear_task_instances(qry, session) session.commit() ti0.refresh_from_db() dr0 = session.query(DagRun).filter(DagRun.dag_id == dag_id, DagRun.execution_date == now).first() assert dr0.state == DagRunState.QUEUED def test_dagrun_find(self, session): now = timezone.utcnow() dag_id1 = "test_dagrun_find_externally_triggered" dag_run = DagRun( dag_id=dag_id1, run_id=dag_id1, run_type=DagRunType.MANUAL, execution_date=now, start_date=now, state=DagRunState.RUNNING, external_trigger=True, ) session.add(dag_run) dag_id2 = "test_dagrun_find_not_externally_triggered" dag_run = DagRun( dag_id=dag_id2, run_id=dag_id2, run_type=DagRunType.MANUAL, execution_date=now, start_date=now, state=DagRunState.RUNNING, external_trigger=False, ) session.add(dag_run) session.commit() assert 1 == len(DagRun.find(dag_id=dag_id1, external_trigger=True)) assert 1 == len(DagRun.find(run_id=dag_id1)) assert 2 == len(DagRun.find(run_id=[dag_id1, dag_id2])) assert 2 == len(DagRun.find(execution_date=[now, now])) assert 2 == len(DagRun.find(execution_date=now)) assert 0 == len(DagRun.find(dag_id=dag_id1, external_trigger=False)) assert 0 == len(DagRun.find(dag_id=dag_id2, external_trigger=True)) assert 1 == len(DagRun.find(dag_id=dag_id2, external_trigger=False)) def test_dagrun_find_duplicate(self, session): now = timezone.utcnow() dag_id = "test_dagrun_find_duplicate" dag_run = DagRun( dag_id=dag_id, run_id=dag_id, run_type=DagRunType.MANUAL, execution_date=now, start_date=now, state=DagRunState.RUNNING, external_trigger=True, ) session.add(dag_run) session.commit() assert DagRun.find_duplicate(dag_id=dag_id, run_id=dag_id, execution_date=now) is not None assert DagRun.find_duplicate(dag_id=dag_id, run_id=dag_id, execution_date=None) is not None assert DagRun.find_duplicate(dag_id=dag_id, run_id=None, execution_date=now) is not None assert DagRun.find_duplicate(dag_id=dag_id, run_id=None, execution_date=None) is None def test_dagrun_success_when_all_skipped(self, session): """ Tests that a DAG run succeeds when all tasks are skipped """ dag = DAG(dag_id='test_dagrun_success_when_all_skipped', start_date=timezone.datetime(2017, 1, 1)) dag_task1 = ShortCircuitOperator( task_id='test_short_circuit_false', dag=dag, python_callable=lambda: False ) dag_task2 = EmptyOperator(task_id='test_state_skipped1', dag=dag) dag_task3 = EmptyOperator(task_id='test_state_skipped2', dag=dag) dag_task1.set_downstream(dag_task2) dag_task2.set_downstream(dag_task3) initial_task_states = { 'test_short_circuit_false': TaskInstanceState.SUCCESS, 'test_state_skipped1': TaskInstanceState.SKIPPED, 'test_state_skipped2': TaskInstanceState.SKIPPED, } dag_run = self.create_dag_run(dag=dag, task_states=initial_task_states, session=session) dag_run.update_state() assert DagRunState.SUCCESS == dag_run.state def test_dagrun_success_conditions(self, session): dag = DAG('test_dagrun_success_conditions', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}) # A -> B # A -> C -> D # ordered: B, D, C, A or D, B, C, A or D, C, B, A with dag: op1 = EmptyOperator(task_id='A') op2 = EmptyOperator(task_id='B') op3 = EmptyOperator(task_id='C') op4 = EmptyOperator(task_id='D') op1.set_upstream([op2, op3]) op3.set_upstream(op4) dag.clear() now = pendulum.now("UTC") dr = dag.create_dagrun( run_id='test_dagrun_success_conditions', state=DagRunState.RUNNING, execution_date=now, data_interval=dag.timetable.infer_manual_data_interval(run_after=now), start_date=now, ) # op1 = root ti_op1 = dr.get_task_instance(task_id=op1.task_id) ti_op1.set_state(state=TaskInstanceState.SUCCESS, session=session) ti_op2 = dr.get_task_instance(task_id=op2.task_id) ti_op3 = dr.get_task_instance(task_id=op3.task_id) ti_op4 = dr.get_task_instance(task_id=op4.task_id) # root is successful, but unfinished tasks dr.update_state() assert DagRunState.RUNNING == dr.state # one has failed, but root is successful ti_op2.set_state(state=TaskInstanceState.FAILED, session=session) ti_op3.set_state(state=TaskInstanceState.SUCCESS, session=session) ti_op4.set_state(state=TaskInstanceState.SUCCESS, session=session) dr.update_state() assert DagRunState.SUCCESS == dr.state def test_dagrun_deadlock(self, session): dag = DAG('text_dagrun_deadlock', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}) with dag: op1 = EmptyOperator(task_id='A') op2 = EmptyOperator(task_id='B') op2.trigger_rule = TriggerRule.ONE_FAILED op2.set_upstream(op1) dag.clear() now = pendulum.now("UTC") dr = dag.create_dagrun( run_id='test_dagrun_deadlock', state=DagRunState.RUNNING, execution_date=now, data_interval=dag.timetable.infer_manual_data_interval(run_after=now), start_date=now, ) ti_op1 = dr.get_task_instance(task_id=op1.task_id) ti_op1.set_state(state=TaskInstanceState.SUCCESS, session=session) ti_op2 = dr.get_task_instance(task_id=op2.task_id) ti_op2.set_state(state=None, session=session) dr.update_state() assert dr.state == DagRunState.RUNNING ti_op2.set_state(state=None, session=session) op2.trigger_rule = 'invalid' dr.update_state() assert dr.state == DagRunState.FAILED def test_dagrun_no_deadlock_with_shutdown(self, session): dag = DAG('test_dagrun_no_deadlock_with_shutdown', start_date=DEFAULT_DATE) with dag: op1 = EmptyOperator(task_id='upstream_task') op2 = EmptyOperator(task_id='downstream_task') op2.set_upstream(op1) dr = dag.create_dagrun( run_id='test_dagrun_no_deadlock_with_shutdown', state=DagRunState.RUNNING, execution_date=DEFAULT_DATE, data_interval=dag.timetable.infer_manual_data_interval(run_after=DEFAULT_DATE), start_date=DEFAULT_DATE, ) upstream_ti = dr.get_task_instance(task_id='upstream_task') upstream_ti.set_state(TaskInstanceState.SHUTDOWN, session=session) dr.update_state() assert dr.state == DagRunState.RUNNING def test_dagrun_no_deadlock_with_depends_on_past(self, session): dag = DAG('test_dagrun_no_deadlock', start_date=DEFAULT_DATE) with dag: EmptyOperator(task_id='dop', depends_on_past=True) EmptyOperator(task_id='tc', max_active_tis_per_dag=1) dag.clear() dr = dag.create_dagrun( run_id='test_dagrun_no_deadlock_1', state=DagRunState.RUNNING, execution_date=DEFAULT_DATE, data_interval=dag.timetable.infer_manual_data_interval(run_after=DEFAULT_DATE), start_date=DEFAULT_DATE, ) next_date = DEFAULT_DATE + datetime.timedelta(days=1) dr2 = dag.create_dagrun( run_id='test_dagrun_no_deadlock_2', state=DagRunState.RUNNING, execution_date=next_date, data_interval=dag.timetable.infer_manual_data_interval(run_after=next_date), start_date=next_date, ) ti1_op1 = dr.get_task_instance(task_id='dop') dr2.get_task_instance(task_id='dop') ti2_op1 = dr.get_task_instance(task_id='tc') dr.get_task_instance(task_id='tc') ti1_op1.set_state(state=TaskInstanceState.RUNNING, session=session) dr.update_state() dr2.update_state() assert dr.state == DagRunState.RUNNING assert dr2.state == DagRunState.RUNNING ti2_op1.set_state(state=TaskInstanceState.RUNNING, session=session) dr.update_state() dr2.update_state() assert dr.state == DagRunState.RUNNING assert dr2.state == DagRunState.RUNNING def test_dagrun_success_callback(self, session): def on_success_callable(context): assert context['dag_run'].dag_id == 'test_dagrun_success_callback' dag = DAG( dag_id='test_dagrun_success_callback', start_date=datetime.datetime(2017, 1, 1), on_success_callback=on_success_callable, ) dag_task1 = EmptyOperator(task_id='test_state_succeeded1', dag=dag) dag_task2 = EmptyOperator(task_id='test_state_succeeded2', dag=dag) dag_task1.set_downstream(dag_task2) initial_task_states = { 'test_state_succeeded1': TaskInstanceState.SUCCESS, 'test_state_succeeded2': TaskInstanceState.SUCCESS, } # Scheduler uses Serialized DAG -- so use that instead of the Actual DAG dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag)) dag_run = self.create_dag_run(dag=dag, task_states=initial_task_states, session=session) _, callback = dag_run.update_state() assert DagRunState.SUCCESS == dag_run.state # Callbacks are not added until handle_callback = False is passed to dag_run.update_state() assert callback is None def test_dagrun_failure_callback(self, session): def on_failure_callable(context): assert context['dag_run'].dag_id == 'test_dagrun_failure_callback' dag = DAG( dag_id='test_dagrun_failure_callback', start_date=datetime.datetime(2017, 1, 1), on_failure_callback=on_failure_callable, ) dag_task1 = EmptyOperator(task_id='test_state_succeeded1', dag=dag) dag_task2 = EmptyOperator(task_id='test_state_failed2', dag=dag) initial_task_states = { 'test_state_succeeded1': TaskInstanceState.SUCCESS, 'test_state_failed2': TaskInstanceState.FAILED, } dag_task1.set_downstream(dag_task2) # Scheduler uses Serialized DAG -- so use that instead of the Actual DAG dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag)) dag_run = self.create_dag_run(dag=dag, task_states=initial_task_states, session=session) _, callback = dag_run.update_state() assert DagRunState.FAILED == dag_run.state # Callbacks are not added until handle_callback = False is passed to dag_run.update_state() assert callback is None def test_dagrun_update_state_with_handle_callback_success(self, session): def on_success_callable(context): assert context['dag_run'].dag_id == 'test_dagrun_update_state_with_handle_callback_success' dag = DAG( dag_id='test_dagrun_update_state_with_handle_callback_success', start_date=datetime.datetime(2017, 1, 1), on_success_callback=on_success_callable, ) dag_task1 = EmptyOperator(task_id='test_state_succeeded1', dag=dag) dag_task2 = EmptyOperator(task_id='test_state_succeeded2', dag=dag) dag_task1.set_downstream(dag_task2) initial_task_states = { 'test_state_succeeded1': TaskInstanceState.SUCCESS, 'test_state_succeeded2': TaskInstanceState.SUCCESS, } # Scheduler uses Serialized DAG -- so use that instead of the Actual DAG dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag)) dag_run = self.create_dag_run(dag=dag, task_states=initial_task_states, session=session) _, callback = dag_run.update_state(execute_callbacks=False) assert DagRunState.SUCCESS == dag_run.state # Callbacks are not added until handle_callback = False is passed to dag_run.update_state() assert callback == DagCallbackRequest( full_filepath=dag_run.dag.fileloc, dag_id="test_dagrun_update_state_with_handle_callback_success", run_id=dag_run.run_id, is_failure_callback=False, msg="success", ) def test_dagrun_update_state_with_handle_callback_failure(self, session): def on_failure_callable(context): assert context['dag_run'].dag_id == 'test_dagrun_update_state_with_handle_callback_failure' dag = DAG( dag_id='test_dagrun_update_state_with_handle_callback_failure', start_date=datetime.datetime(2017, 1, 1), on_failure_callback=on_failure_callable, ) dag_task1 = EmptyOperator(task_id='test_state_succeeded1', dag=dag) dag_task2 = EmptyOperator(task_id='test_state_failed2', dag=dag) dag_task1.set_downstream(dag_task2) initial_task_states = { 'test_state_succeeded1': TaskInstanceState.SUCCESS, 'test_state_failed2': TaskInstanceState.FAILED, } # Scheduler uses Serialized DAG -- so use that instead of the Actual DAG dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag)) dag_run = self.create_dag_run(dag=dag, task_states=initial_task_states, session=session) _, callback = dag_run.update_state(execute_callbacks=False) assert DagRunState.FAILED == dag_run.state # Callbacks are not added until handle_callback = False is passed to dag_run.update_state() assert callback == DagCallbackRequest( full_filepath=dag_run.dag.fileloc, dag_id="test_dagrun_update_state_with_handle_callback_failure", run_id=dag_run.run_id, is_failure_callback=True, msg="task_failure", ) def test_dagrun_set_state_end_date(self, session): dag = DAG('test_dagrun_set_state_end_date', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}) dag.clear() now = pendulum.now("UTC") dr = dag.create_dagrun( run_id='test_dagrun_set_state_end_date', state=DagRunState.RUNNING, execution_date=now, data_interval=dag.timetable.infer_manual_data_interval(now), start_date=now, ) # Initial end_date should be NULL # DagRunState.SUCCESS and DagRunState.FAILED are all ending state and should set end_date # DagRunState.RUNNING set end_date back to NULL session.add(dr) session.commit() assert dr.end_date is None dr.set_state(DagRunState.SUCCESS) session.merge(dr) session.commit() dr_database = session.query(DagRun).filter(DagRun.run_id == 'test_dagrun_set_state_end_date').one() assert dr_database.end_date is not None assert dr.end_date == dr_database.end_date dr.set_state(DagRunState.RUNNING) session.merge(dr) session.commit() dr_database = session.query(DagRun).filter(DagRun.run_id == 'test_dagrun_set_state_end_date').one() assert dr_database.end_date is None dr.set_state(DagRunState.FAILED) session.merge(dr) session.commit() dr_database = session.query(DagRun).filter(DagRun.run_id == 'test_dagrun_set_state_end_date').one() assert dr_database.end_date is not None assert dr.end_date == dr_database.end_date def test_dagrun_update_state_end_date(self, session): dag = DAG( 'test_dagrun_update_state_end_date', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'} ) # A -> B with dag: op1 = EmptyOperator(task_id='A') op2 = EmptyOperator(task_id='B') op1.set_upstream(op2) dag.clear() now = pendulum.now("UTC") dr = dag.create_dagrun( run_id='test_dagrun_update_state_end_date', state=DagRunState.RUNNING, execution_date=now, data_interval=dag.timetable.infer_manual_data_interval(now), start_date=now, ) # Initial end_date should be NULL # DagRunState.SUCCESS and DagRunState.FAILED are all ending state and should set end_date # DagRunState.RUNNING set end_date back to NULL session.merge(dr) session.commit() assert dr.end_date is None ti_op1 = dr.get_task_instance(task_id=op1.task_id) ti_op1.set_state(state=TaskInstanceState.SUCCESS, session=session) ti_op2 = dr.get_task_instance(task_id=op2.task_id) ti_op2.set_state(state=TaskInstanceState.SUCCESS, session=session) dr.update_state() dr_database = session.query(DagRun).filter(DagRun.run_id == 'test_dagrun_update_state_end_date').one() assert dr_database.end_date is not None assert dr.end_date == dr_database.end_date ti_op1.set_state(state=TaskInstanceState.RUNNING, session=session) ti_op2.set_state(state=TaskInstanceState.RUNNING, session=session) dr.update_state() dr_database = session.query(DagRun).filter(DagRun.run_id == 'test_dagrun_update_state_end_date').one() assert dr._state == DagRunState.RUNNING assert dr.end_date is None assert dr_database.end_date is None ti_op1.set_state(state=TaskInstanceState.FAILED, session=session) ti_op2.set_state(state=TaskInstanceState.FAILED, session=session) dr.update_state() dr_database = session.query(DagRun).filter(DagRun.run_id == 'test_dagrun_update_state_end_date').one() assert dr_database.end_date is not None assert dr.end_date == dr_database.end_date def test_get_task_instance_on_empty_dagrun(self, session): """ Make sure that a proper value is returned when a dagrun has no task instances """ dag = DAG(dag_id='test_get_task_instance_on_empty_dagrun', start_date=timezone.datetime(2017, 1, 1)) ShortCircuitOperator(task_id='test_short_circuit_false', dag=dag, python_callable=lambda: False) now = timezone.utcnow() # Don't use create_dagrun since it will create the task instances too which we # don't want dag_run = DagRun( dag_id=dag.dag_id, run_id="test_get_task_instance_on_empty_dagrun", run_type=DagRunType.MANUAL, execution_date=now, start_date=now, state=DagRunState.RUNNING, external_trigger=False, ) session.add(dag_run) session.commit() ti = dag_run.get_task_instance('test_short_circuit_false') assert ti is None def test_get_latest_runs(self, session): dag = DAG(dag_id='test_latest_runs_1', start_date=DEFAULT_DATE) self.create_dag_run(dag, execution_date=timezone.datetime(2015, 1, 1), session=session) self.create_dag_run(dag, execution_date=timezone.datetime(2015, 1, 2), session=session) dagruns = DagRun.get_latest_runs(session) session.close() for dagrun in dagruns: if dagrun.dag_id == 'test_latest_runs_1': assert dagrun.execution_date == timezone.datetime(2015, 1, 2) def test_removed_task_instances_can_be_restored(self, session): def with_all_tasks_removed(dag): return DAG(dag_id=dag.dag_id, start_date=dag.start_date) dag = DAG('test_task_restoration', start_date=DEFAULT_DATE) dag.add_task(EmptyOperator(task_id='flaky_task', owner='test')) dagrun = self.create_dag_run(dag, session=session) flaky_ti = dagrun.get_task_instances()[0] assert 'flaky_task' == flaky_ti.task_id assert flaky_ti.state is None dagrun.dag = with_all_tasks_removed(dag) dagrun.verify_integrity() flaky_ti.refresh_from_db() assert flaky_ti.state is None dagrun.dag.add_task(EmptyOperator(task_id='flaky_task', owner='test')) dagrun.verify_integrity() flaky_ti.refresh_from_db() assert flaky_ti.state is None def test_already_added_task_instances_can_be_ignored(self, session): dag = DAG('triggered_dag', start_date=DEFAULT_DATE) dag.add_task(EmptyOperator(task_id='first_task', owner='test')) dagrun = self.create_dag_run(dag, session=session) first_ti = dagrun.get_task_instances()[0] assert 'first_task' == first_ti.task_id assert first_ti.state is None # Lets assume that the above TI was added into DB by webserver, but if scheduler # is running the same method at the same time it would find 0 TIs for this dag # and proceeds further to create TIs. Hence mocking DagRun.get_task_instances # method to return an empty list of TIs. with mock.patch.object(DagRun, 'get_task_instances') as mock_gtis: mock_gtis.return_value = [] dagrun.verify_integrity() first_ti.refresh_from_db() assert first_ti.state is None @pytest.mark.parametrize("state", State.task_states) @mock.patch.object(settings, 'task_instance_mutation_hook', autospec=True) def test_task_instance_mutation_hook(self, mock_hook, session, state): def mutate_task_instance(task_instance): if task_instance.queue == 'queue1': task_instance.queue = 'queue2' else: task_instance.queue = 'queue1' mock_hook.side_effect = mutate_task_instance dag = DAG('test_task_instance_mutation_hook', start_date=DEFAULT_DATE) dag.add_task(EmptyOperator(task_id='task_to_mutate', owner='test', queue='queue1')) dagrun = self.create_dag_run(dag, session=session) task = dagrun.get_task_instances()[0] task.state = state session.merge(task) session.commit() assert task.queue == 'queue2' dagrun.verify_integrity() task = dagrun.get_task_instances()[0] assert task.queue == 'queue1' @pytest.mark.parametrize( "prev_ti_state, is_ti_success", [ (TaskInstanceState.SUCCESS, True), (TaskInstanceState.SKIPPED, True), (TaskInstanceState.RUNNING, False), (TaskInstanceState.FAILED, False), (None, False), ], ) def test_depends_on_past(self, session, prev_ti_state, is_ti_success): dag_id = 'test_depends_on_past' dag = self.dagbag.get_dag(dag_id) task = dag.tasks[0] dag_run_1 = self.create_dag_run( dag, execution_date=timezone.datetime(2016, 1, 1, 0, 0, 0), is_backfill=True, session=session, ) dag_run_2 = self.create_dag_run( dag, execution_date=timezone.datetime(2016, 1, 2, 0, 0, 0), is_backfill=True, session=session, ) prev_ti = TI(task, run_id=dag_run_1.run_id) ti = TI(task, run_id=dag_run_2.run_id) prev_ti.set_state(prev_ti_state) ti.set_state(TaskInstanceState.QUEUED) ti.run() assert (ti.state == TaskInstanceState.SUCCESS) == is_ti_success @pytest.mark.parametrize( "prev_ti_state, is_ti_success", [ (TaskInstanceState.SUCCESS, True), (TaskInstanceState.SKIPPED, True), (TaskInstanceState.RUNNING, False), (TaskInstanceState.FAILED, False), (None, False), ], ) def test_wait_for_downstream(self, session, prev_ti_state, is_ti_success): dag_id = 'test_wait_for_downstream' dag = self.dagbag.get_dag(dag_id) upstream, downstream = dag.tasks # For ti.set_state() to work, the DagRun has to exist, # Otherwise ti.previous_ti returns an unpersisted TI dag_run_1 = self.create_dag_run( dag, execution_date=timezone.datetime(2016, 1, 1, 0, 0, 0), is_backfill=True, session=session, ) dag_run_2 = self.create_dag_run( dag, execution_date=timezone.datetime(2016, 1, 2, 0, 0, 0), is_backfill=True, session=session, ) prev_ti_downstream = TI(task=downstream, run_id=dag_run_1.run_id) ti = TI(task=upstream, run_id=dag_run_2.run_id) prev_ti = ti.get_previous_ti() prev_ti.set_state(TaskInstanceState.SUCCESS) assert prev_ti.state == TaskInstanceState.SUCCESS prev_ti_downstream.set_state(prev_ti_state) ti.set_state(TaskInstanceState.QUEUED) ti.run() assert (ti.state == TaskInstanceState.SUCCESS) == is_ti_success @pytest.mark.parametrize("state", [DagRunState.QUEUED, DagRunState.RUNNING]) def test_next_dagruns_to_examine_only_unpaused(self, session, state): """ Check that "next_dagruns_to_examine" ignores runs from paused/inactive DAGs and gets running/queued dagruns """ dag = DAG(dag_id='test_dags', start_date=DEFAULT_DATE) EmptyOperator(task_id='dummy', dag=dag, owner='airflow') orm_dag = DagModel( dag_id=dag.dag_id, has_task_concurrency_limits=False, next_dagrun=DEFAULT_DATE, next_dagrun_create_after=DEFAULT_DATE + datetime.timedelta(days=1), is_active=True, ) session.add(orm_dag) session.flush() dr = dag.create_dagrun( run_type=DagRunType.SCHEDULED, state=state, execution_date=DEFAULT_DATE, data_interval=dag.infer_automated_data_interval(DEFAULT_DATE), start_date=DEFAULT_DATE if state == DagRunState.RUNNING else None, session=session, ) runs = DagRun.next_dagruns_to_examine(state, session).all() assert runs == [dr] orm_dag.is_paused = True session.flush() runs = DagRun.next_dagruns_to_examine(state, session).all() assert runs == [] @mock.patch.object(Stats, 'timing') def test_no_scheduling_delay_for_nonscheduled_runs(self, stats_mock, session): """ Tests that dag scheduling delay stat is not called if the dagrun is not a scheduled run. This case is manual run. Simple test for coherence check. """ dag = DAG(dag_id='test_dagrun_stats', start_date=DEFAULT_DATE) dag_task = EmptyOperator(task_id='dummy', dag=dag) initial_task_states = { dag_task.task_id: TaskInstanceState.SUCCESS, } dag_run = self.create_dag_run(dag=dag, task_states=initial_task_states, session=session) dag_run.update_state() assert call(f'dagrun.{dag.dag_id}.first_task_scheduling_delay') not in stats_mock.mock_calls @pytest.mark.parametrize( "schedule_interval, expected", [ ("*/5 * * * *", True), (None, False), ("@once", False), ], ) def test_emit_scheduling_delay(self, session, schedule_interval, expected): """ Tests that dag scheduling delay stat is set properly once running scheduled dag. dag_run.update_state() invokes the _emit_true_scheduling_delay_stats_for_finished_state method. """ dag = DAG(dag_id='test_emit_dag_stats', start_date=DEFAULT_DATE, schedule_interval=schedule_interval) dag_task = EmptyOperator(task_id='dummy', dag=dag, owner='airflow') try: info = dag.next_dagrun_info(None) orm_dag_kwargs = {"dag_id": dag.dag_id, "has_task_concurrency_limits": False, "is_active": True} if info is not None: orm_dag_kwargs.update( { "next_dagrun": info.logical_date, "next_dagrun_data_interval": info.data_interval, "next_dagrun_create_after": info.run_after, }, ) orm_dag = DagModel(**orm_dag_kwargs) session.add(orm_dag) session.flush() dag_run = dag.create_dagrun( run_type=DagRunType.SCHEDULED, state=DagRunState.SUCCESS, execution_date=dag.start_date, data_interval=dag.infer_automated_data_interval(dag.start_date), start_date=dag.start_date, session=session, ) ti = dag_run.get_task_instance(dag_task.task_id, session) ti.set_state(TaskInstanceState.SUCCESS, session) session.flush() with mock.patch.object(Stats, 'timing') as stats_mock: dag_run.update_state(session) metric_name = f'dagrun.{dag.dag_id}.first_task_scheduling_delay' if expected: true_delay = ti.start_date - dag_run.data_interval_end sched_delay_stat_call = call(metric_name, true_delay) assert sched_delay_stat_call in stats_mock.mock_calls else: # Assert that we never passed the metric sched_delay_stat_call = call(metric_name, mock.ANY) assert sched_delay_stat_call not in stats_mock.mock_calls finally: # Don't write anything to the DB session.rollback() session.close() def test_states_sets(self, session): """ Tests that adding State.failed_states and State.success_states work as expected. """ dag = DAG(dag_id='test_dagrun_states', start_date=DEFAULT_DATE) dag_task_success = EmptyOperator(task_id='dummy', dag=dag) dag_task_failed = EmptyOperator(task_id='dummy2', dag=dag) initial_task_states = { dag_task_success.task_id: TaskInstanceState.SUCCESS, dag_task_failed.task_id: TaskInstanceState.FAILED, } dag_run = self.create_dag_run(dag=dag, task_states=initial_task_states, session=session) ti_success = dag_run.get_task_instance(dag_task_success.task_id) ti_failed = dag_run.get_task_instance(dag_task_failed.task_id) assert ti_success.state in State.success_states assert ti_failed.state in State.failed_states @pytest.mark.parametrize( ('run_type', 'expected_tis'), [ pytest.param(DagRunType.MANUAL, 1, id='manual'), pytest.param(DagRunType.BACKFILL_JOB, 3, id='backfill'), ], ) @mock.patch.object(Stats, 'incr') def test_verify_integrity_task_start_and_end_date(Stats_incr, session, run_type, expected_tis): """Test that tasks with specific dates are only created for backfill runs""" with DAG('test', start_date=DEFAULT_DATE) as dag: EmptyOperator(task_id='without') EmptyOperator(task_id='with_start_date', start_date=DEFAULT_DATE + datetime.timedelta(1)) EmptyOperator(task_id='with_end_date', end_date=DEFAULT_DATE - datetime.timedelta(1)) dag_run = DagRun( dag_id=dag.dag_id, run_type=run_type, execution_date=DEFAULT_DATE, run_id=DagRun.generate_run_id(run_type, DEFAULT_DATE), ) dag_run.dag = dag session.add(dag_run) session.flush() dag_run.verify_integrity(session) tis = dag_run.task_instances assert len(tis) == expected_tis Stats_incr.assert_called_with('task_instance_created-EmptyOperator', expected_tis) @pytest.mark.parametrize('is_noop', [True, False]) def test_expand_mapped_task_instance_at_create(is_noop, dag_maker, session): with mock.patch('airflow.settings.task_instance_mutation_hook') as mock_mut: mock_mut.is_noop = is_noop literal = [1, 2, 3, 4] with dag_maker(session=session, dag_id='test_dag'): mapped = MockOperator.partial(task_id='task_2').expand(arg2=literal) dr = dag_maker.create_dagrun() indices = ( session.query(TI.map_index) .filter_by(task_id=mapped.task_id, dag_id=mapped.dag_id, run_id=dr.run_id) .order_by(TI.map_index) .all() ) assert indices == [(0,), (1,), (2,), (3,)] @pytest.mark.need_serialized_dag @pytest.mark.parametrize('is_noop', [True, False]) def
(is_noop, dag_maker, session): with mock.patch('airflow.settings.task_instance_mutation_hook') as mock_mut: mock_mut.is_noop = is_noop @task def mynameis(arg): print(arg) literal = [1, 2, 3, 4] with dag_maker(session=session, dag_id='test_dag'): mynameis.expand(arg=literal) dr = dag_maker.create_dagrun() indices = ( session.query(TI.map_index) .filter_by(task_id='mynameis', dag_id=dr.dag_id, run_id=dr.run_id) .order_by(TI.map_index) .all() ) assert indices == [(0,), (1,), (2,), (3,)] def test_mapped_literal_verify_integrity(dag_maker, session): """Test that when the length of a mapped literal changes we remove extra TIs""" with dag_maker(session=session) as dag: @task def task_2(arg2): ... task_2.expand(arg2=[1, 2, 3, 4]) dr = dag_maker.create_dagrun() # Now "change" the DAG and we should see verify_integrity REMOVE some TIs dag._remove_task('task_2') with dag: mapped = task_2.expand(arg2=[1, 2]).operator # At this point, we need to test that the change works on the serialized # DAG (which is what the scheduler operates on) serialized_dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag)) dr.dag = serialized_dag dr.verify_integrity() indices = ( session.query(TI.map_index, TI.state) .filter_by(task_id=mapped.task_id, dag_id=mapped.dag_id, run_id=dr.run_id) .order_by(TI.map_index) .all() ) assert indices == [(0, None), (1, None), (2, TaskInstanceState.REMOVED), (3, TaskInstanceState.REMOVED)] def test_mapped_literal_to_xcom_arg_verify_integrity(dag_maker, session): """Test that when we change from literal to a XComArg the TIs are removed""" with dag_maker(session=session) as dag: t1 = BaseOperator(task_id='task_1') @task def task_2(arg2): ... task_2.expand(arg2=[1, 2, 3, 4]) dr = dag_maker.create_dagrun() # Now "change" the DAG and we should see verify_integrity REMOVE some TIs dag._remove_task('task_2') with dag: mapped = task_2.expand(arg2=XComArg(t1)).operator # At this point, we need to test that the change works on the serialized # DAG (which is what the scheduler operates on) serialized_dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag)) dr.dag = serialized_dag dr.verify_integrity() indices = ( session.query(TI.map_index, TI.state) .filter_by(task_id=mapped.task_id, dag_id=mapped.dag_id, run_id=dr.run_id) .order_by(TI.map_index) .all() ) assert indices == [ (0, TaskInstanceState.REMOVED), (1, TaskInstanceState.REMOVED), (2, TaskInstanceState.REMOVED), (3, TaskInstanceState.REMOVED), ] @pytest.mark.need_serialized_dag def test_mapped_mixed__literal_not_expanded_at_create(dag_maker, session): literal = [1, 2, 3, 4] with dag_maker(session=session): task = BaseOperator(task_id='task_1') mapped = MockOperator.partial(task_id='task_2').expand(arg1=literal, arg2=XComArg(task)) dr = dag_maker.create_dagrun() query = ( session.query(TI.map_index, TI.state) .filter_by(task_id=mapped.task_id, dag_id=mapped.dag_id, run_id=dr.run_id) .order_by(TI.map_index) ) assert query.all() == [(-1, None)] # Verify_integrity shouldn't change the result now that the TIs exist dr.verify_integrity(session=session) assert query.all() == [(-1, None)] def test_ti_scheduling_mapped_zero_length(dag_maker, session): with dag_maker(session=session): task = BaseOperator(task_id='task_1') mapped = MockOperator.partial(task_id='task_2').expand(arg2=XComArg(task)) dr: DagRun = dag_maker.create_dagrun() ti1, ti2 = sorted(dr.task_instances, key=lambda ti: ti.task_id) ti1.state = TaskInstanceState.SUCCESS session.add( TaskMap(dag_id=dr.dag_id, task_id=ti1.task_id, run_id=dr.run_id, map_index=-1, length=0, keys=None) ) session.flush() decision = dr.task_instance_scheduling_decisions(session=session) # ti1 finished execution. ti2 goes directly to finished state because it's # expanded against a zero-length XCom. assert decision.finished_tis == [ti1, ti2] indices = ( session.query(TI.map_index, TI.state) .filter_by(task_id=mapped.task_id, dag_id=mapped.dag_id, run_id=dr.run_id) .order_by(TI.map_index) .all() ) assert indices == [(-1, TaskInstanceState.SKIPPED)] def test_mapped_task_upstream_failed(dag_maker, session): from airflow.operators.python import PythonOperator with dag_maker(session=session) as dag: @dag.task def make_list(): return list(map(lambda a: f'echo "{a!r}"', [1, 2, {'a': 'b'}])) def consumer(*args): print(repr(args)) PythonOperator.partial(task_id='consumer', python_callable=consumer).expand(op_args=make_list()) dr = dag_maker.create_dagrun() _, make_list_ti = sorted(dr.task_instances, key=lambda ti: ti.task_id) make_list_ti.state = TaskInstanceState.FAILED session.flush() tis, _ = dr.update_state(execute_callbacks=False, session=session) assert tis == [] tis = sorted(dr.task_instances, key=lambda ti: ti.task_id) assert sorted((ti.task_id, ti.map_index, ti.state) for ti in tis) == [ ("consumer", -1, TaskInstanceState.UPSTREAM_FAILED), ("make_list", -1, TaskInstanceState.FAILED), ] # Bug/possible source of optimization: The DR isn't marked as failed until # in the loop that marks the last task as UPSTREAM_FAILED tis, _ = dr.update_state(execute_callbacks=False, session=session) assert tis == [] assert dr.state == DagRunState.FAILED def test_mapped_task_all_finish_before_downstream(dag_maker, session): result = None with dag_maker(session=session) as dag: @dag.task def make_list(): return [1, 2] @dag.task def double(value): return value * 2 @dag.task def consumer(value): nonlocal result result = list(value) consumer(value=double.expand(value=make_list())) dr: DagRun = dag_maker.create_dagrun() def _task_ids(tis): return [ti.task_id for ti in tis] # The first task is always make_list. decision = dr.task_instance_scheduling_decisions(session=session) assert _task_ids(decision.schedulable_tis) == ["make_list"] # After make_list is run, double is expanded. decision.schedulable_tis[0].run(verbose=False, session=session) decision = dr.task_instance_scheduling_decisions(session=session) assert _task_ids(decision.schedulable_tis) == ["double", "double"] # Running just one of the mapped tis does not make downstream schedulable. decision.schedulable_tis[0].run(verbose=False, session=session) decision = dr.task_instance_scheduling_decisions(session=session) assert _task_ids(decision.schedulable_tis) == ["double"] # Downstream is schedulable after all mapped tis are run. decision.schedulable_tis[0].run(verbose=False, session=session) decision = dr.task_instance_scheduling_decisions(session=session) assert _task_ids(decision.schedulable_tis) == ["consumer"] # We should be able to get all values aggregated from mapped upstreams. decision.schedulable_tis[0].run(verbose=False, session=session) decision = dr.task_instance_scheduling_decisions(session=session) assert decision.schedulable_tis == [] assert result == [2, 4] def test_schedule_tis_map_index(dag_maker, session): with dag_maker(session=session, dag_id="test"): task = BaseOperator(task_id='task_1') dr = DagRun(dag_id="test", run_id="test", run_type=DagRunType.MANUAL) ti0 = TI(task=task, run_id=dr.run_id, map_index=0, state=TaskInstanceState.SUCCESS) ti1 = TI(task=task, run_id=dr.run_id, map_index=1, state=None) ti2 = TI(task=task, run_id=dr.run_id, map_index=2, state=TaskInstanceState.SUCCESS) session.add_all((dr, ti0, ti1, ti2)) session.flush() assert dr.schedule_tis((ti1,), session=session) == 1 session.refresh(ti0) session.refresh(ti1) session.refresh(ti2) assert ti0.state == TaskInstanceState.SUCCESS assert ti1.state == TaskInstanceState.SCHEDULED assert ti2.state == TaskInstanceState.SUCCESS
test_expand_mapped_task_instance_task_decorator
index.ts
'use strict'; import Vue from 'vue'; import App from './App.vue' import EmbedVideoPlayer from './embeds/html5'; import EmbedTwitchPlayer from './embeds/twitch'; import EmbedYouTubePlayer from './embeds/youtube'; import VODPlayer from './vodplayer'; // main hooks document.addEventListener("DOMContentLoaded", () => { const vodplayer = new VODPlayer(); const app = new Vue({ render: h => h(App), data: { vp: vodplayer } }).$mount('#app'); vodplayer.elements.player = document.getElementById('player'); vodplayer.elements.video = document.getElementById('video'); vodplayer.elements.comments = document.getElementById('comments'); vodplayer.elements.osd = document.getElementById('osd'); // vodplayer.elements.timeline = document.getElementById('timeline-text'); vodplayer.elements.playback_text = document.getElementById('playback_text'); vodplayer.hooks(); console.debug("vodplayer", vodplayer); let processHash = () => { console.debug("Process hash", window.location.hash); let query = window.location.hash; let query_param = query.split("&"); let params: any = {}; for (let param of query_param) { params[param.split("=")[0].replace("#", "")] = param.split("=")[1]; } // twitch client id if(params.tci){ vodplayer.settings.twitchClientId = params.tci; } // twitch secret if(params.ts){ vodplayer.settings.twitchSecret = params.ts; } // token if(params.tk){ vodplayer.settings.twitchToken = params.tk; } if(params.offset){ vodplayer.chatOffset = parseInt(params.offset); } if(params.chapters){ vodplayer.videoChapters = []; let ch = params.chapters.split(";"); for( let c of ch ){ let d = c.split(":"); let chapter = { time: parseInt(d[0]), label: d[1] }; console.log("add chapter", chapter); vodplayer.videoChapters.push(chapter); } console.log(vodplayer.videoChapters); } // automate it if (params.source) { console.debug("automate playback"); vodplayer.automated = true; // load video if (params.source == "youtube") { (<any>window).onYouTubeIframeAPIReady = () => { vodplayer.embedPlayer = new EmbedYouTubePlayer(params.youtube_id); } } else if (params.source == "twitch") { vodplayer.embedPlayer = new EmbedTwitchPlayer(params.twitch_id);
vodplayer.embedPlayer = new EmbedVideoPlayer(params.video_path); }else{ alert("No video source set"); return false; } // set up embed player if (vodplayer.embedPlayer) { vodplayer.embedPlayer.vodplayer = vodplayer; vodplayer.embedPlayer.setup(); } // load chat if (params.chatdump && vodplayer.embedPlayer) { vodplayer.loadTwitchChat(params.chatdump); } else if (params.chatfile && vodplayer.embedPlayer) { vodplayer.embedPlayer.setCallback('ready', () => { console.debug("player ready, load chat file"); vodplayer.loadChatFileFromURL(params.chatfile); }); }else{ alert("No chat source set"); return false; } } } window.addEventListener("hashchange", () => processHash); processHash(); });
} else if (params.source == "file") {
inspector.rs
//! checks for attributes use clippy_utils::get_attr; use rustc_ast::ast::{Attribute, InlineAsmTemplatePiece}; use rustc_hir as hir; use rustc_lint::{LateContext, LateLintPass, LintContext}; use rustc_session::Session; use rustc_session::{declare_lint_pass, declare_tool_lint}; declare_clippy_lint! { /// ### What it does /// Dumps every ast/hir node which has the `#[clippy::dump]` /// attribute /// /// ### Example /// ```rust,ignore /// #[clippy::dump] /// extern crate foo; /// ``` /// /// prints /// /// ```text /// item `foo` /// visibility inherited from outer item /// extern crate dylib source: "/path/to/foo.so" /// ``` pub DEEP_CODE_INSPECTION, internal_warn, "helper to dump info about code" } declare_lint_pass!(DeepCodeInspector => [DEEP_CODE_INSPECTION]); impl<'tcx> LateLintPass<'tcx> for DeepCodeInspector { fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::Item<'_>) { if !has_attr(cx.sess(), cx.tcx.hir().attrs(item.hir_id())) { return; } print_item(cx, item); } fn check_impl_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::ImplItem<'_>) { if !has_attr(cx.sess(), cx.tcx.hir().attrs(item.hir_id())) { return; } println!("impl item `{}`", item.ident.name); match item.vis.node { hir::VisibilityKind::Public => println!("public"), hir::VisibilityKind::Crate(_) => println!("visible crate wide"), hir::VisibilityKind::Restricted { path, .. } => println!( "visible in module `{}`", rustc_hir_pretty::to_string(rustc_hir_pretty::NO_ANN, |s| s.print_path(path, false)) ), hir::VisibilityKind::Inherited => println!("visibility inherited from outer item"), } if item.defaultness.is_default() { println!("default"); } match item.kind { hir::ImplItemKind::Const(_, body_id) => { println!("associated constant"); print_expr(cx, &cx.tcx.hir().body(body_id).value, 1); }, hir::ImplItemKind::Fn(..) => println!("method"), hir::ImplItemKind::TyAlias(_) => println!("associated type"), } } // fn check_trait_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx // hir::TraitItem) { // if !has_attr(&item.attrs) { // return; // } // } // // fn check_variant(&mut self, cx: &LateContext<'tcx>, var: &'tcx // hir::Variant, _: // &hir::Generics) { // if !has_attr(&var.node.attrs) { // return; // } // } // // fn check_field_def(&mut self, cx: &LateContext<'tcx>, field: &'tcx // hir::FieldDef) { // if !has_attr(&field.attrs) { // return; // } // } // fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx hir::Expr<'_>) { if !has_attr(cx.sess(), cx.tcx.hir().attrs(expr.hir_id)) { return; } print_expr(cx, expr, 0); } fn check_arm(&mut self, cx: &LateContext<'tcx>, arm: &'tcx hir::Arm<'_>) { if !has_attr(cx.sess(), cx.tcx.hir().attrs(arm.hir_id)) { return; } print_pat(cx, arm.pat, 1); if let Some(ref guard) = arm.guard { println!("guard:"); print_guard(cx, guard, 1); } println!("body:"); print_expr(cx, arm.body, 1); } fn check_stmt(&mut self, cx: &LateContext<'tcx>, stmt: &'tcx hir::Stmt<'_>) { if !has_attr(cx.sess(), cx.tcx.hir().attrs(stmt.hir_id)) { return; } match stmt.kind { hir::StmtKind::Local(local) => { println!("local variable of type {}", cx.typeck_results().node_type(local.hir_id)); println!("pattern:"); print_pat(cx, local.pat, 0); if let Some(e) = local.init { println!("init expression:"); print_expr(cx, e, 0); } }, hir::StmtKind::Item(_) => println!("item decl"), hir::StmtKind::Expr(e) | hir::StmtKind::Semi(e) => print_expr(cx, e, 0), } } // fn check_foreign_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx // hir::ForeignItem) { // if !has_attr(&item.attrs) { // return; // } // } // } fn has_attr(sess: &Session, attrs: &[Attribute]) -> bool { get_attr(sess, attrs, "dump").count() > 0 } #[allow(clippy::similar_names)] #[allow(clippy::too_many_lines)] fn print_expr(cx: &LateContext<'_>, expr: &hir::Expr<'_>, indent: usize) { let ind = " ".repeat(indent); println!("{}+", ind); println!("{}ty: {}", ind, cx.typeck_results().expr_ty(expr)); println!( "{}adjustments: {:?}", ind, cx.typeck_results().adjustments().get(expr.hir_id) ); match expr.kind { hir::ExprKind::Box(e) => { println!("{}Box", ind); print_expr(cx, e, indent + 1); }, hir::ExprKind::Array(v) => { println!("{}Array", ind); for e in v { print_expr(cx, e, indent + 1); } }, hir::ExprKind::Call(func, args) => { println!("{}Call", ind); println!("{}function:", ind); print_expr(cx, func, indent + 1); println!("{}arguments:", ind); for arg in args { print_expr(cx, arg, indent + 1); } }, hir::ExprKind::MethodCall(path, _, args, _) => { println!("{}MethodCall", ind); println!("{}method name: {}", ind, path.ident.name); for arg in args { print_expr(cx, arg, indent + 1); } }, hir::ExprKind::Tup(v) => { println!("{}Tup", ind); for e in v { print_expr(cx, e, indent + 1); } }, hir::ExprKind::Binary(op, lhs, rhs) => { println!("{}Binary", ind); println!("{}op: {:?}", ind, op.node); println!("{}lhs:", ind); print_expr(cx, lhs, indent + 1); println!("{}rhs:", ind); print_expr(cx, rhs, indent + 1); }, hir::ExprKind::Unary(op, inner) => { println!("{}Unary", ind); println!("{}op: {:?}", ind, op); print_expr(cx, inner, indent + 1); }, hir::ExprKind::Lit(ref lit) => { println!("{}Lit", ind); println!("{}{:?}", ind, lit); }, hir::ExprKind::Cast(e, target) => { println!("{}Cast", ind); print_expr(cx, e, indent + 1); println!("{}target type: {:?}", ind, target); }, hir::ExprKind::Type(e, target) => { println!("{}Type", ind); print_expr(cx, e, indent + 1); println!("{}target type: {:?}", ind, target); }, hir::ExprKind::Loop(..) => { println!("{}Loop", ind); }, hir::ExprKind::If(cond, _, ref else_opt) => { println!("{}If", ind); println!("{}condition:", ind); print_expr(cx, cond, indent + 1); if let Some(els) = *else_opt { println!("{}else:", ind); print_expr(cx, els, indent + 1); } }, hir::ExprKind::Match(cond, _, ref source) => { println!("{}Match", ind); println!("{}condition:", ind); print_expr(cx, cond, indent + 1); println!("{}source: {:?}", ind, source); }, hir::ExprKind::Closure(ref clause, _, _, _, _) => { println!("{}Closure", ind); println!("{}clause: {:?}", ind, clause); }, hir::ExprKind::Yield(sub, _) => { println!("{}Yield", ind); print_expr(cx, sub, indent + 1); }, hir::ExprKind::Block(_, _) => { println!("{}Block", ind); }, hir::ExprKind::Assign(lhs, rhs, _) => { println!("{}Assign", ind); println!("{}lhs:", ind); print_expr(cx, lhs, indent + 1); println!("{}rhs:", ind); print_expr(cx, rhs, indent + 1); }, hir::ExprKind::AssignOp(ref binop, lhs, rhs) => { println!("{}AssignOp", ind); println!("{}op: {:?}", ind, binop.node); println!("{}lhs:", ind); print_expr(cx, lhs, indent + 1); println!("{}rhs:", ind); print_expr(cx, rhs, indent + 1); }, hir::ExprKind::Field(e, ident) => { println!("{}Field", ind); println!("{}field name: {}", ind, ident.name); println!("{}struct expr:", ind); print_expr(cx, e, indent + 1); }, hir::ExprKind::Index(arr, idx) => { println!("{}Index", ind); println!("{}array expr:", ind); print_expr(cx, arr, indent + 1); println!("{}index expr:", ind); print_expr(cx, idx, indent + 1); }, hir::ExprKind::Path(hir::QPath::Resolved(ref ty, path)) => { println!("{}Resolved Path, {:?}", ind, ty); println!("{}path: {:?}", ind, path); }, hir::ExprKind::Path(hir::QPath::TypeRelative(ty, seg)) => { println!("{}Relative Path, {:?}", ind, ty); println!("{}seg: {:?}", ind, seg); }, hir::ExprKind::Path(hir::QPath::LangItem(lang_item, ..)) => { println!("{}Lang Item Path, {:?}", ind, lang_item.name()); }, hir::ExprKind::AddrOf(kind, ref muta, e) => { println!("{}AddrOf", ind); println!("kind: {:?}", kind); println!("mutability: {:?}", muta); print_expr(cx, e, indent + 1); }, hir::ExprKind::Break(_, ref e) => { println!("{}Break", ind); if let Some(e) = *e { print_expr(cx, e, indent + 1); } }, hir::ExprKind::Continue(_) => println!("{}Again", ind), hir::ExprKind::Ret(ref e) => { println!("{}Ret", ind); if let Some(e) = *e { print_expr(cx, e, indent + 1); } }, hir::ExprKind::InlineAsm(asm) => { println!("{}InlineAsm", ind); println!("{}template: {}", ind, InlineAsmTemplatePiece::to_string(asm.template)); println!("{}options: {:?}", ind, asm.options); println!("{}operands:", ind); for (op, _op_sp) in asm.operands { match op { hir::InlineAsmOperand::In { expr, .. } | hir::InlineAsmOperand::InOut { expr, .. } | hir::InlineAsmOperand::Sym { expr } => print_expr(cx, expr, indent + 1), hir::InlineAsmOperand::Out { expr, .. } => { if let Some(expr) = expr { print_expr(cx, expr, indent + 1); } }, hir::InlineAsmOperand::SplitInOut { in_expr, out_expr, .. } => { print_expr(cx, in_expr, indent + 1); if let Some(out_expr) = out_expr { print_expr(cx, out_expr, indent + 1); } }, hir::InlineAsmOperand::Const { anon_const } => { println!("{}anon_const:", ind); print_expr(cx, &cx.tcx.hir().body(anon_const.body).value, indent + 1); }, } } }, hir::ExprKind::LlvmInlineAsm(asm) => { let inputs = &asm.inputs_exprs; let outputs = &asm.outputs_exprs; println!("{}LlvmInlineAsm", ind); println!("{}inputs:", ind); for e in inputs.iter() { print_expr(cx, e, indent + 1); } println!("{}outputs:", ind); for e in outputs.iter() { print_expr(cx, e, indent + 1); } }, hir::ExprKind::Struct(path, fields, ref base) => { println!("{}Struct", ind); println!("{}path: {:?}", ind, path); for field in fields { println!("{}field \"{}\":", ind, field.ident.name); print_expr(cx, field.expr, indent + 1); } if let Some(base) = *base { println!("{}base:", ind); print_expr(cx, base, indent + 1); } }, hir::ExprKind::ConstBlock(ref anon_const) => { println!("{}ConstBlock", ind); println!("{}anon_const:", ind); print_expr(cx, &cx.tcx.hir().body(anon_const.body).value, indent + 1); }, hir::ExprKind::Repeat(val, ref anon_const) => { println!("{}Repeat", ind); println!("{}value:", ind); print_expr(cx, val, indent + 1); println!("{}repeat count:", ind); print_expr(cx, &cx.tcx.hir().body(anon_const.body).value, indent + 1); }, hir::ExprKind::Err => { println!("{}Err", ind); }, hir::ExprKind::DropTemps(e) => { println!("{}DropTemps", ind); print_expr(cx, e, indent + 1); }, } } fn print_item(cx: &LateContext<'_>, item: &hir::Item<'_>) { let did = item.def_id; println!("item `{}`", item.ident.name); match item.vis.node { hir::VisibilityKind::Public => println!("public"), hir::VisibilityKind::Crate(_) => println!("visible crate wide"), hir::VisibilityKind::Restricted { path, .. } => println!( "visible in module `{}`", rustc_hir_pretty::to_string(rustc_hir_pretty::NO_ANN, |s| s.print_path(path, false)) ), hir::VisibilityKind::Inherited => println!("visibility inherited from outer item"), } match item.kind { hir::ItemKind::ExternCrate(ref _renamed_from) => { if let Some(crate_id) = cx.tcx.extern_mod_stmt_cnum(did) { let source = cx.tcx.used_crate_source(crate_id); if let Some(ref src) = source.dylib { println!("extern crate dylib source: {:?}", src.0); } if let Some(ref src) = source.rlib { println!("extern crate rlib source: {:?}", src.0); } } else { println!("weird extern crate without a crate id"); } }, hir::ItemKind::Use(path, ref kind) => println!("{:?}, {:?}", path, kind), hir::ItemKind::Static(..) => println!("static item of type {:#?}", cx.tcx.type_of(did)), hir::ItemKind::Const(..) => println!("const item of type {:#?}", cx.tcx.type_of(did)), hir::ItemKind::Fn(..) => { let item_ty = cx.tcx.type_of(did); println!("function of type {:#?}", item_ty); }, hir::ItemKind::Mod(..) => println!("module"), hir::ItemKind::ForeignMod { abi, .. } => println!("foreign module with abi: {}", abi), hir::ItemKind::GlobalAsm(asm) => println!("global asm: {:?}", asm), hir::ItemKind::TyAlias(..) => { println!("type alias for {:?}", cx.tcx.type_of(did)); }, hir::ItemKind::OpaqueTy(..) => { println!("existential type with real type {:?}", cx.tcx.type_of(did)); }, hir::ItemKind::Enum(..) => { println!("enum definition of type {:?}", cx.tcx.type_of(did)); }, hir::ItemKind::Struct(..) => { println!("struct definition of type {:?}", cx.tcx.type_of(did)); }, hir::ItemKind::Union(..) => { println!("union definition of type {:?}", cx.tcx.type_of(did)); }, hir::ItemKind::Trait(..) => { println!("trait decl"); if cx.tcx.trait_is_auto(did.to_def_id()) { println!("trait is auto"); } else { println!("trait is not auto"); } }, hir::ItemKind::TraitAlias(..) => { println!("trait alias"); }, hir::ItemKind::Impl(hir::Impl { of_trait: Some(ref _trait_ref), .. }) => { println!("trait impl"); }, hir::ItemKind::Impl(hir::Impl { of_trait: None, .. }) => { println!("impl"); }, } } #[allow(clippy::similar_names)] #[allow(clippy::too_many_lines)] fn print_pat(cx: &LateContext<'_>, pat: &hir::Pat<'_>, indent: usize) { let ind = " ".repeat(indent); println!("{}+", ind); match pat.kind { hir::PatKind::Wild => println!("{}Wild", ind), hir::PatKind::Binding(ref mode, .., ident, ref inner) => { println!("{}Binding", ind); println!("{}mode: {:?}", ind, mode); println!("{}name: {}", ind, ident.name);
}, hir::PatKind::Or(fields) => { println!("{}Or", ind); for field in fields { print_pat(cx, field, indent + 1); } }, hir::PatKind::Struct(ref path, fields, ignore) => { println!("{}Struct", ind); println!( "{}name: {}", ind, rustc_hir_pretty::to_string(rustc_hir_pretty::NO_ANN, |s| s.print_qpath(path, false)) ); println!("{}ignore leftover fields: {}", ind, ignore); println!("{}fields:", ind); for field in fields { println!("{} field name: {}", ind, field.ident.name); if field.is_shorthand { println!("{} in shorthand notation", ind); } print_pat(cx, field.pat, indent + 1); } }, hir::PatKind::TupleStruct(ref path, fields, opt_dots_position) => { println!("{}TupleStruct", ind); println!( "{}path: {}", ind, rustc_hir_pretty::to_string(rustc_hir_pretty::NO_ANN, |s| s.print_qpath(path, false)) ); if let Some(dot_position) = opt_dots_position { println!("{}dot position: {}", ind, dot_position); } for field in fields { print_pat(cx, field, indent + 1); } }, hir::PatKind::Path(hir::QPath::Resolved(ref ty, path)) => { println!("{}Resolved Path, {:?}", ind, ty); println!("{}path: {:?}", ind, path); }, hir::PatKind::Path(hir::QPath::TypeRelative(ty, seg)) => { println!("{}Relative Path, {:?}", ind, ty); println!("{}seg: {:?}", ind, seg); }, hir::PatKind::Path(hir::QPath::LangItem(lang_item, ..)) => { println!("{}Lang Item Path, {:?}", ind, lang_item.name()); }, hir::PatKind::Tuple(pats, opt_dots_position) => { println!("{}Tuple", ind); if let Some(dot_position) = opt_dots_position { println!("{}dot position: {}", ind, dot_position); } for field in pats { print_pat(cx, field, indent + 1); } }, hir::PatKind::Box(inner) => { println!("{}Box", ind); print_pat(cx, inner, indent + 1); }, hir::PatKind::Ref(inner, ref muta) => { println!("{}Ref", ind); println!("{}mutability: {:?}", ind, muta); print_pat(cx, inner, indent + 1); }, hir::PatKind::Lit(e) => { println!("{}Lit", ind); print_expr(cx, e, indent + 1); }, hir::PatKind::Range(ref l, ref r, ref range_end) => { println!("{}Range", ind); if let Some(expr) = l { print_expr(cx, expr, indent + 1); } if let Some(expr) = r { print_expr(cx, expr, indent + 1); } match *range_end { hir::RangeEnd::Included => println!("{} end included", ind), hir::RangeEnd::Excluded => println!("{} end excluded", ind), } }, hir::PatKind::Slice(first_pats, ref range, last_pats) => { println!("{}Slice [a, b, ..i, y, z]", ind); println!("[a, b]:"); for pat in first_pats { print_pat(cx, pat, indent + 1); } println!("i:"); if let Some(pat) = *range { print_pat(cx, pat, indent + 1); } println!("[y, z]:"); for pat in last_pats { print_pat(cx, pat, indent + 1); } }, } } fn print_guard(cx: &LateContext<'_>, guard: &hir::Guard<'_>, indent: usize) { let ind = " ".repeat(indent); println!("{}+", ind); match guard { hir::Guard::If(expr) => { println!("{}If", ind); print_expr(cx, expr, indent + 1); }, hir::Guard::IfLet(pat, expr) => { println!("{}IfLet", ind); print_pat(cx, pat, indent + 1); print_expr(cx, expr, indent + 1); }, } }
if let Some(inner) = *inner { println!("{}inner:", ind); print_pat(cx, inner, indent + 1); }
index.js
import { combineReducers } from 'redux'; import { routerReducer } from 'react-router-redux'; import app from './app'; import note from './note'; import reminders from './reminders'; import todo from './todo';
reminders, router: routerReducer, todo, });
export default combineReducers({ app, note,
const.py
# ========================================================================================= # Copyright 2015 Community Information Online Consortium (CIOC) and KCL Software Solutions # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ========================================================================================= # std lib import os # jQuery and jQueryUI versions JQUERY_VERSION = "1.6.2" JQUERY_UI_VERSION = "1.8.16" # formatting constants DATE_TEXT_SIZE = 25 TEXT_SIZE = 85 TEXTAREA_COLS = 85 TEXTAREA_ROWS_SHORT = 2 TEXTAREA_ROWS_LONG = 4 TEXTAREA_ROWS_XLONG = 10 MAX_LENGTH_CHECKLIST_NOTES = 255 EMAIL_LENGTH = 60 # application running constants _app_path = None _config_file = None _app_name = None session_lock_dir = None publish_dir = None def
(): # called from application init at startup global _app_path, _config_file, _app_name, session_lock_dir, publish_dir if _app_path is None: _app_path = os.path.normpath(os.path.join(os.path.dirname(__file__), '..', '..', '..')) _app_name = os.path.split(_app_path)[1] _config_file = os.path.join(_app_path, '..', '..', 'config', _app_name + '.ini') session_lock_dir = os.path.join(_app_path, 'python', 'session_lock') publish_dir = os.path.join(_app_path, 'python', 'published_files') try: os.makedirs(session_lock_dir) except os.error: pass try: os.makedirs(publish_dir) except os.error: pass
update_cache_values
strbytes.go
/* * Copyright (c) 2018 wellwell.work, LLC by Zoe * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ package convert import ( "reflect" "unsafe" ) // Bytes2String converts byte slice to a string without memory allocation. // See https://groups.google.com/forum/#!msg/Golang-Nuts/ENgbUzYvCuU/90yGx7GUAgAJ . // // Note it may break if string and/or slice header will change // in the future go versions. func Bytes2String(bs []byte) string { return *(*string)(unsafe.Pointer(&bs)) } // String2Bytes converts string to a byte slice without memory allocation. // // Note it may break if string and/or slice header will change // in the future go versions. func String2Bytes(str string) []byte
{ sh := (*reflect.StringHeader)(unsafe.Pointer(&str)) bh := reflect.SliceHeader{ Data: sh.Data, Len: sh.Len, Cap: sh.Len, } return *(*[]byte)(unsafe.Pointer(&bh)) }
spelling.py
"""Inconsistent spelling. --- layout: post source: Intelligent Editing Ltd. source_url: http://bit.ly/1x3hYj7 title: Inconsistent spelling date: 2014-06-10 12:31:19 categories: writing --- Intelligent Editing Ltd. says: > Some words have more than one correct spelling. American, British, Australian and Canadian English all have their own preferences. Even within those, there can be multiple spellings. For example, in the UK 'realise' is often preferred. However, 'realize' has been used in British-English for centuries and is preferred in the Oxford English Dictionary. However, no matter which spelling is preferred, one thing is always wrong: you mustn't use two different spellings in the same document. """ from proselint.tools import consistency_check, memoize @memoize def check(text): """Check the text.""" err = "consistency.spelling" msg = "Inconsistent spelling of '{}' (vs. '{}')." word_pairs = [ ["advisor", "adviser"], # ["analyse", "analyze"], ["centre", "center"], ["colour", "color"], ["emphasise", "emphasize"],
["learnt", "learned"], ["organise", "organize"], ["organised", "organized"], ["organising", "organizing"], ["recognise", "recognize"], ] return consistency_check(text, word_pairs, err, msg)
["finalise", "finalize"], ["focussed", "focused"], ["labour", "labor"],
cryptotest.py
import ccxt class crypto(object):
c = crypto() c.get_watchlist() print(c.watchlist) c.query_watchlist("lastPrice") c.list_attributes()
watchlist = {"BTC":"", "ETH":"", "EOS":"", "ADA":"", "IOTA":"", "NULS":"", "NEO":"", "SKY":"", "PAL":""} def __init__(self): self.build_market() def list_exchanges(self): n = [print(x) for x in ccxt.exchanges] def build_market(self): self.binance = ccxt.binance() self.coinbase = ccxt.coinbase() self.kucoin = ccxt.kucoin() def get_watchlist(self): for i in self.watchlist: try: self.watchlist[i] = self.binance.fetch_ticker(i+"/BTC") except: pass def query_watchlist(self, attribute): for i in self.watchlist: try: print(self.watchlist[i]["symbol"], self.watchlist[i]['info'][attribute]) except TypeError: print(i) def list_attributes(self): for i in self.watchlist["ETH"]: print(i) print() for i in self.watchlist["ETH"]["info"]: print(i)
s3.go
// Copyright 2018 mikan. package main import ( "bytes" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" ) func
(name string, data []byte) { bucket := "chart.aosn.ws" service := s3.New(session.Must(session.NewSession(&aws.Config{ Region: aws.String(endpoints.ApNortheast1RegionID), }))) var expires = time.Now().Add(time.Hour * 24 * 6) // 6 days later _, err := service.PutObject(&s3.PutObjectInput{ Bucket: aws.String(bucket), Key: aws.String(name), Body: bytes.NewReader(data), ContentType: aws.String("application/json"), ACL: aws.String(s3.BucketCannedACLPublicRead), Expires: &expires, }) if err != nil { panic(err) } }
Upload
auth.js
import getProp from 'dotprop' import Storage from './storage' import { routeOption, isRelativeURL, isSet, isSameURL } from './utilities' export default class Auth { constructor (ctx, options) { this.ctx = ctx this.options = options // for (var key in this.options.redirect) { // this.options.redirect[key] = '/' + ctx.app.i18n.locale + this.options.redirect[key] // }
// for (var key in this.options.redirect) { // this.options.redirect[key] = this.options.redirect[key].replace('/' + old, '/' + locale) // } // } // Strategies this.strategies = {} // Error listeners this._errorListeners = [] // Storage & State options.initialState = { user: null, loggedIn: false } const storage = new Storage(ctx, options) this.$storage = storage this.$state = storage.state } async init () { // Reset on error if (this.options.resetOnError) { this.onError((...args) => { if (typeof (this.options.resetOnError) !== 'function' || this.options.resetOnError(...args)) { this.reset() } }) } // Restore strategy this.$storage.syncUniversal('strategy', this.options.defaultStrategy) // Set default strategy if current one is invalid if (!this.strategy) { this.$storage.setUniversal('strategy', this.options.defaultStrategy) // Give up if still invalid if (!this.strategy) { return Promise.resolve() } } try { // Call mounted for active strategy on initial load await this.mounted() } catch (error) { this.callOnError(error) } finally { // Watch for loggedIn changes only in client side if (process.client && this.options.watchLoggedIn) { this.$storage.watchState('loggedIn', loggedIn => { if (!routeOption(this.ctx.route, 'auth', false)) { this.redirect(loggedIn ? 'home' : 'logout') } }) } } } // Backward compatibility get state () { if (!this._state_warn_shown) { this._state_warn_shown = true // eslint-disable-next-line no-console console.warn('[AUTH] $auth.state is deprecated. Please use $auth.$state or top level props like $auth.loggedIn') } return this.$state } getState (key) { if (!this._get_state_warn_shown) { this._get_state_warn_shown = true // eslint-disable-next-line no-console console.warn('[AUTH] $auth.getState is deprecated. Please use $auth.$storage.getState() or top level props like $auth.loggedIn') } return this.$storage.getState(key) } // --------------------------------------------------------------- // Strategy and Scheme // --------------------------------------------------------------- get strategy () { return this.strategies[this.$state.strategy] } registerStrategy (name, strategy) { this.strategies[name] = strategy } setStrategy (name) { if (name === this.$storage.getUniversal('strategy')) { return Promise.resolve() } // Set strategy this.$storage.setUniversal('strategy', name) // Call mounted hook on active strategy return this.mounted() } mounted () { if (!this.strategy.mounted) { return this.fetchUserOnce() } return Promise.resolve(this.strategy.mounted(...arguments)).catch(error => { this.callOnError(error, { method: 'mounted' }) return Promise.reject(error) }) } loginWith (name, ...args) { return this.setStrategy(name).then(() => this.login(...args)) } login () { if (!this.strategy.login) { return Promise.resolve() } return this.wrapLogin(this.strategy.login(...arguments)).catch(error => { this.callOnError(error, { method: 'login' }) return Promise.reject(error) }) } fetchUser () { if (!this.strategy.fetchUser) { return Promise.resolve() } return Promise.resolve(this.strategy.fetchUser(...arguments)).catch(error => { this.callOnError(error, { method: 'fetchUser' }) return Promise.reject(error) }) } logout () { if (!this.strategy.logout) { this.reset() return Promise.resolve() } return Promise.resolve(this.strategy.logout(...arguments)).catch(error => { this.callOnError(error, { method: 'logout' }) return Promise.reject(error) }) } setUserToken (token) { if (!this.strategy.setUserToken) { this.setToken(this.strategy.name, token) return Promise.resolve() } return Promise.resolve(this.strategy.setUserToken(token)).catch(error => { this.callOnError(error, { method: 'setUserToken' }) return Promise.reject(error) }) } reset () { if (!this.strategy.reset) { this.setUser(false) this.setToken(this.$state.strategy, false) this.setRefreshToken(this.$state.strategy, false) return Promise.resolve() } return Promise.resolve(this.strategy.reset(...arguments)).catch(error => { this.callOnError(error, { method: 'reset' }) return Promise.reject(error) }) } // --------------------------------------------------------------- // Token helpers // --------------------------------------------------------------- getToken (strategy) { const _key = this.options.token.prefix + strategy return this.$storage.getUniversal(_key) } setToken (strategy, token) { const _key = this.options.token.prefix + strategy return this.$storage.setUniversal(_key, token) } syncToken (strategy) { const _key = this.options.token.prefix + strategy return this.$storage.syncUniversal(_key) } // --------------------------------------------------------------- // Refresh token helpers // --------------------------------------------------------------- getRefreshToken (strategy) { const _key = this.options.refresh_token.prefix + strategy return this.$storage.getUniversal(_key) } setRefreshToken (strategy, refreshToken) { const _key = this.options.refresh_token.prefix + strategy return this.$storage.setUniversal(_key, refreshToken) } syncRefreshToken (strategy) { const _key = this.options.refresh_token.prefix + strategy return this.$storage.syncUniversal(_key) } // --------------------------------------------------------------- // User helpers // --------------------------------------------------------------- get user () { return this.$state.user } get loggedIn () { return this.$state.loggedIn } fetchUserOnce () { if (!this.$state.user) { return this.fetchUser(...arguments) } return Promise.resolve() } setUser (user) { this.$storage.setState('loggedIn', Boolean(user)) this.$storage.setState('user', user) } // --------------------------------------------------------------- // Utils // --------------------------------------------------------------- get busy () { return this.$storage.getState('busy') } request (endpoint, defaults) { const _endpoint = typeof defaults === 'object' ? Object.assign({}, defaults, endpoint) : endpoint return this.ctx.app.$axios .request(_endpoint) .then(response => { if (_endpoint.propertyName) { return getProp(response.data, _endpoint.propertyName) } else { return response.data } }) .catch(error => { // Call all error handlers this.callOnError(error, { method: 'request' }) // Throw error return Promise.reject(error) }) } requestWith (strategy, endpoint, defaults) { const token = this.getToken(strategy) const _endpoint = Object.assign({}, defaults, endpoint) if (!_endpoint.headers) { _endpoint.headers = {} } if (!_endpoint.headers['Authorization'] && isSet(token) && token) { _endpoint.headers['Authorization'] = token } return this.request(_endpoint) } wrapLogin (promise) { this.$storage.setState('busy', true) this.error = null return Promise.resolve(promise) .then(() => { this.$storage.setState('busy', false) }) .catch(error => { this.$storage.setState('busy', false) return Promise.reject(error) }) } onError (listener) { this._errorListeners.push(listener) } callOnError (error, payload = {}) { this.error = error for (let fn of this._errorListeners) { fn(error, payload) } } redirect (name, noRouter = false) { if (!this.options.redirect) { return } const from = this.options.fullPathRedirect ? this.ctx.route.fullPath : this.ctx.route.path var to = this.options.redirect[name] if (this.options.i18n) { var path = this.options.redirect[name] if (path === '/') { path = 'index' } else { path = path.replace(/^\/|\/$/g, '').replace(/\//g, '-') } to = this.ctx.app.router.app.localePath(path) } if (!to) { return } // Apply rewrites if (this.options.rewriteRedirects) { if (name === 'login' && isRelativeURL(from) && !isSameURL(to, from)) { this.$storage.setUniversal('redirect', from) } if (name === 'home') { const redirect = this.$storage.getUniversal('redirect') this.$storage.setUniversal('redirect', null) if (isRelativeURL(redirect)) { to = redirect } } } // Prevent infinity redirects if (isSameURL(to, from)) { return } if (process.browser) { if (noRouter) { window.location.replace(to) } else { this.ctx.redirect(to) } } else { this.ctx.redirect(to) } } hasScope (scope) { const userScopes = this.$state.user && getProp(this.$state.user, this.options.scopeKey) if (!userScopes) { return undefined } if (Array.isArray(userScopes)) { return userScopes.includes(scope) } return Boolean(getProp(userScopes, scope)) } }
// ctx.app.i18n.onLanguageSwitched = (old, locale) => { // console.log(this.ctx.app.router.app.localePath) // console.log(this.ctx.app.router.app.localePath('home', locale))
hosttarget.rs
use crate::message::commands::IRCMessageParseExt; use crate::message::{IRCMessage, ServerMessageParseError}; use itertools::Itertools; use std::convert::TryFrom; use std::str::FromStr; /// When a channel starts or stops hosting another channel. #[derive(Debug, Clone, PartialEq)] pub struct HostTargetMessage { /// Login name of the channel that just started or ended host mode. pub channel_login: String, /// The type of action that was taken in the channel, either host mode was enabled (entered) /// or disabled (exited). pub action: HostTargetAction, /// The message that this `HostTargetMessage` was parsed from. pub source: IRCMessage, } /// The type of action that a `HOSTTARGET` signifies, either host mode was enabled (entered) /// or disabled (exited). #[derive(Debug, Clone, PartialEq)] pub enum HostTargetAction { /// Host mode was enabled (entered). HostModeOn { /// Login name of the channel that is now being hosted. hosted_channel_login: String, /// Optional: number of viewers watching the host. If missing this number is /// unknown at this moment. viewer_count: Option<u64>, }, /// Host mode was disabled (exited). HostModeOff { /// Optional: number of viewers watching the host. If missing this number is /// unknown at this moment. viewer_count: Option<u64>, }, } impl TryFrom<IRCMessage> for HostTargetMessage { type Error = ServerMessageParseError; fn
(source: IRCMessage) -> Result<HostTargetMessage, ServerMessageParseError> { if source.command != "HOSTTARGET" { return Err(ServerMessageParseError::MismatchedCommand(source)); } // examples: // host on: :tmi.twitch.tv HOSTTARGET #randers :leebaxd 0 // host on: :tmi.twitch.tv HOSTTARGET #randers :leebaxd - // host off: :tmi.twitch.tv HOSTTARGET #randers :- 0 // hosttarget_parameter is that glued-together parameter at the end, e.g. "leebaxd 0". // we then split it. let hosttarget_parameter = source.try_get_param(1)?; let (hosted_channel_raw, viewer_count_raw) = hosttarget_parameter .splitn(2, ' ') .next_tuple() .ok_or_else(|| ServerMessageParseError::MalformedParameter(source.to_owned(), 1))?; let viewer_count = match viewer_count_raw { "-" => None, viewer_count => Some(u64::from_str(viewer_count).map_err(|_| { ServerMessageParseError::MalformedParameter(source.to_owned(), 2) })?), }; let action = match hosted_channel_raw { "-" => HostTargetAction::HostModeOff { viewer_count }, hosted_channel_login => HostTargetAction::HostModeOn { hosted_channel_login: hosted_channel_login.to_owned(), viewer_count, }, }; Ok(HostTargetMessage { channel_login: source.try_get_channel_login()?.to_owned(), action, source, }) } } impl From<HostTargetMessage> for IRCMessage { fn from(msg: HostTargetMessage) -> IRCMessage { msg.source } } #[cfg(test)] mod tests { use crate::message::commands::hosttarget::HostTargetAction; use crate::message::{HostTargetMessage, IRCMessage}; use std::convert::TryFrom; #[test] fn test_fresh_host_on() { let src = ":tmi.twitch.tv HOSTTARGET #randers :leebaxd 0"; let irc_message = IRCMessage::parse(src).unwrap(); let msg = HostTargetMessage::try_from(irc_message.clone()).unwrap(); assert_eq!( msg, HostTargetMessage { channel_login: "randers".to_owned(), action: HostTargetAction::HostModeOn { hosted_channel_login: "leebaxd".to_owned(), viewer_count: Some(0) }, source: irc_message } ); } #[test] fn test_stale_host_on() { let src = ":tmi.twitch.tv HOSTTARGET #randers :leebaxd -"; let irc_message = IRCMessage::parse(src).unwrap(); let msg = HostTargetMessage::try_from(irc_message.clone()).unwrap(); assert_eq!( msg, HostTargetMessage { channel_login: "randers".to_owned(), action: HostTargetAction::HostModeOn { hosted_channel_login: "leebaxd".to_owned(), viewer_count: None }, source: irc_message } ); } #[test] fn test_host_off() { let src = ":tmi.twitch.tv HOSTTARGET #randers :- 0"; let irc_message = IRCMessage::parse(src).unwrap(); let msg = HostTargetMessage::try_from(irc_message.clone()).unwrap(); assert_eq!( msg, HostTargetMessage { channel_login: "randers".to_owned(), action: HostTargetAction::HostModeOff { viewer_count: Some(0) }, source: irc_message } ); } #[test] fn test_host_off_no_viewer_count() { let src = ":tmi.twitch.tv HOSTTARGET #randers :- -"; let irc_message = IRCMessage::parse(src).unwrap(); let msg = HostTargetMessage::try_from(irc_message.clone()).unwrap(); assert_eq!( msg, HostTargetMessage { channel_login: "randers".to_owned(), action: HostTargetAction::HostModeOff { viewer_count: None }, source: irc_message } ); } }
try_from
HtmlWebpackPlugin.js
var HtmlWebpackPlugin = require('html-webpack-plugin'); module.exports = function (config, options) { config = config || {}; if (!options.isDev) { config.minify = { minimize: true, removeComments: true, collapseWhitespace: true, minifyCSS: false, minifyJS: true, removeScriptTypeAttributes: true, removeStyleTypeAttributes: true }; } config.links = [ // { // rel: 'preconnect', // href: 'https://api.hostmon.ru' // }, { rel: 'preconnect', href: 'https://fonts.googleapis.com' }, { rel: 'stylesheet', href: 'https://fonts.googleapis.com/css?family=Roboto:300,400,500', }, { rel: 'stylesheet', href: 'https://fonts.googleapis.com/icon?family=Material+Icons',
config.meta = [ { 'http-equiv': 'Content-Security-Policy', content: options.isDev ? "default-src 'self' 'unsafe-inline' 'unsafe-eval' data: *" : "default-src 'self';" + "connect-src 'self';" + "script-src 'self' 'unsafe-inline';" + "img-src 'self' data:;" + "style-src 'self' 'unsafe-inline';" + // "font-src 'self' data:;" + "object-src 'none';" + "child-src 'none';" + // deprecated "frame-src 'none';" + "form-action 'self';" + "upgrade-insecure-requests;" + "block-all-mixed-content;" + "base-uri " + config.baseHref || '/' }, { 'http-equiv': 'X-XSS-Protection', content: '1;mode=block' }, { 'http-equiv': 'Strict-Transport-Security', content: 'max-age=31536000; includeSubDomains; preload' }, { 'http-equiv': 'X-Content-Type-Options', content: 'nosniff' }, { name: 'viewport', content: 'minimum-scale=1, initial-scale=1, width=device-width, shrink-to-fit=no' }, { name: 'description', content: 'Chat' }, { name: 'google', content: 'notranslate' }, { name: 'theme-color', content: '#9cf1fa' }, { name: 'mobile-web-app-capable', content: 'yes' } ]; return new HtmlWebpackPlugin(config); };
}, ];
monitor_test.go
// Copyright 2018 Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package monitor_test import ( "errors" "testing" "time" "github.com/onsi/gomega" networking "istio.io/api/networking/v1alpha3"
) const checkInterval = 100 * time.Millisecond var createConfigSet = []*model.Config{ { ConfigMeta: model.ConfigMeta{ Name: "magic", Type: "gateway", }, Spec: &networking.Gateway{ Servers: []*networking.Server{ { Port: &networking.Port{ Number: 80, Protocol: "HTTP", }, Hosts: []string{"*.example.com"}, }, }, }, }, } var updateConfigSet = []*model.Config{ { ConfigMeta: model.ConfigMeta{ Name: "magic", Type: "gateway", }, Spec: &networking.Gateway{ Servers: []*networking.Server{ { Port: &networking.Port{ Number: 80, Protocol: "HTTPS", }, Hosts: []string{"*.example.com"}, }, }, }, }, } func TestMonitorForChange(t *testing.T) { g := gomega.NewGomegaWithT(t) configDescriptor := model.ConfigDescriptor{model.Gateway} store := memory.Make(configDescriptor) var ( callCount int configs []*model.Config err error ) someConfigFunc := func() ([]*model.Config, error) { switch callCount { case 0: configs = createConfigSet err = nil case 3: configs = updateConfigSet case 8: configs = []*model.Config{} } callCount++ return configs, err } mon := monitor.NewMonitor(store, checkInterval, someConfigFunc) stop := make(chan struct{}) defer func() { stop <- struct{}{} }() // shut it down mon.Start(stop) g.Eventually(func() error { c, err := store.List("gateway", "") g.Expect(err).NotTo(gomega.HaveOccurred()) if len(c) != 1 { return errors.New("no configs") } if c[0].ConfigMeta.Name != "magic" { return errors.New("wrong config") } return nil }).Should(gomega.Succeed()) g.Eventually(func() error { c, err := store.List("gateway", "") g.Expect(err).NotTo(gomega.HaveOccurred()) gateway := c[0].Spec.(*networking.Gateway) if gateway.Servers[0].Port.Protocol != "HTTPS" { return errors.New("Protocol has not been updated") } return nil }).Should(gomega.Succeed()) g.Eventually(func() ([]model.Config, error) { return store.List("gateway", "") }).Should(gomega.HaveLen(0)) } func TestMonitorForError(t *testing.T) { g := gomega.NewGomegaWithT(t) configDescriptor := model.ConfigDescriptor{model.Gateway} store := memory.Make(configDescriptor) var ( callCount int configs []*model.Config err error ) delay := make(chan struct{}, 1) someConfigFunc := func() ([]*model.Config, error) { switch callCount { case 0: configs = createConfigSet err = nil case 3: configs = nil err = errors.New("SnapshotFunc can't connect!!") delay <- struct{}{} } callCount++ return configs, err } mon := monitor.NewMonitor(store, checkInterval, someConfigFunc) stop := make(chan struct{}) defer func() { stop <- struct{}{} }() // shut it down mon.Start(stop) //Test ensures that after a coplilot connection error the data remains //nil data return and error return keeps the existing data aka createConfigSet <-delay g.Eventually(func() error { c, err := store.List("gateway", "") g.Expect(err).NotTo(gomega.HaveOccurred()) if len(c) != 1 { return errors.New("Config files erased on Copilot error") } return nil }).Should(gomega.Succeed()) }
"istio.io/istio/pilot/pkg/config/memory" "istio.io/istio/pilot/pkg/config/monitor" "istio.io/istio/pilot/pkg/model"
kendo.culture.ml-IN.min.js
/** * Kendo UI v2018.3.1017 (http://www.telerik.com/kendo-ui) * Copyright 2018 Telerik EAD. All rights reserved. * * Kendo UI commercial licenses may be obtained at * http://www.telerik.com/purchase/license-agreement/kendo-ui-complete * If you do not own a commercial license, this file shall be governed by the trial license terms.
*/ !function(y){"function"==typeof define&&define.amd?define(["kendo.core.min"],y):y()}(function(){!function(y,M){kendo.cultures["ml-IN"]={name:"ml-IN",numberFormat:{pattern:["-n"],decimals:2,",":",",".":".",groupSize:[3,2],percent:{pattern:["-n%","n%"],decimals:2,",":",",".":".",groupSize:[3,2],symbol:"%"},currency:{name:"Indian Rupee",abbr:"INR",pattern:["-$n","$n"],decimals:2,",":",",".":".",groupSize:[3],symbol:"₹"}},calendars:{standard:{days:{names:["ഞായറാഴ്‌ച","തിങ്കളാഴ്‌ച","ചൊവ്വാഴ്ച","ബുധനാഴ്‌ച","വ്യാഴാഴ്‌ച","വെള്ളിയാഴ്‌ച","ശനിയാഴ്‌ച"],namesAbbr:["ഞായർ","തിങ്കൾ","ചൊവ്വ","ബുധൻ","വ്യാഴം","വെള്ളി","ശനി"],namesShort:["ഞാ","തി","ചൊ","ബു","വ്യാ","വെ","ശ"]},months:{names:["ജനുവരി","ഫെബ്രുവരി","മാർച്ച്","ഏപ്രിൽ","മേയ്","ജൂൺ","ജൂലൈ","ഓഗസ്റ്റ്","സെപ്റ്റംബർ","ഒക്‌ടോബർ","നവംബർ","ഡിസംബർ"],namesAbbr:["ജനു","ഫെബ്രു","മാർ","ഏപ്രി","മേയ്","ജൂൺ","ജൂലൈ","ഓഗ","സെപ്റ്റം","ഒക്ടോ","നവം","ഡിസം"]},AM:["AM","am","AM"],PM:["PM","pm","PM"],patterns:{d:"d/M/yyyy",D:"yyyy, MMMM d, dddd",F:"yyyy, MMMM d, dddd h:mm:ss tt",g:"d/M/yyyy h:mm tt",G:"d/M/yyyy h:mm:ss tt",m:"MMMM d",M:"MMMM d",s:"yyyy'-'MM'-'dd'T'HH':'mm':'ss",t:"h:mm tt",T:"h:mm:ss tt",u:"yyyy'-'MM'-'dd HH':'mm':'ss'Z'",y:"yyyy MMMM",Y:"yyyy MMMM"},"/":"/",":":":",firstDay:0}}}}(this)}); //# sourceMappingURL=kendo.culture.ml-IN.min.js.map
PluginHandlers.py
from os import path from shutil import make_archive import os from json import load, dump PLUGINEXTENSION = '.epf' DESCRIPTIONNAME = 'description' def packPluginFromFolder(folderPath): folderPath = path.abspath(folderPath) if not path.exists(folderPath): raise FileNotFoundError('the folder does not exist.') if not path.isdir(folderPath): raise NotADirectoryError('folderPath must be a directory with files.') parentFolder = path.abspath(path.join(folderPath, path.pardir)) descriptionPath = path.abspath(path.join(folderPath, DESCRIPTIONNAME + '.json')) if not path.exists(descriptionPath): raise FileNotFoundError('required plugin description file not found.') zipTitle = folderPath finalName = zipTitle + PLUGINEXTENSION make_archive(zipTitle, 'gztar', folderPath, './') os.rename(zipTitle + '.tar.gz', finalName) class PluginDescription(object): def
(self, name='', description='', authorName='', authorEmail=''): self.name = name self.description = description self.authorName = authorName self.authorEmail = authorEmail def __repr__(self): return self.name def _toDict(self): d = dir(self) dd = {v: getattr(self, v) for v in d if not v.startswith('_') and not callable(getattr(self, v))} return dd def saveToDisk(self, destFolder): try: finalPath = path.abspath(path.join(destFolder, DESCRIPTIONNAME + '.json')) with open(finalPath, 'w') as dest: dump(self._toDict(), dest, indent=4) except: raise @staticmethod def fromDisk(folderPath): descriptionPath = path.abspath(path.join(folderPath, DESCRIPTIONNAME + '.json')) if not path.exists(descriptionPath): raise FileNotFoundError('required plugin description file not found.') with open(descriptionPath) as desc: data = load(desc) description = PluginDescription(**data) return description class _Plugin(object): def __init__(self, description, mainClass, pluginPath): self.description = description self.mainClass = mainClass self.pluginPath = pluginPath
__init__
less11_5.py
# Реалізувати клас Герой що має мати наступні атрибути: ім‘я, здоров‘я, ранг, сила і метод вдарити. # Метод вдарити повинен наносити шкоду противнику в розмірі сили героя. Герой має мати наступні # обмеження: здоров‘я від 0 до 100, ранг 1,2,3. Сила не більше 10% теперішнього здоров‘я героя. # Не можна бити героїв здоров‘я яких менше 5. # # Реалізувати клас маг, який може відновлювати здоров'я інших героїв, також він має ранг як герой і # може наносити удари. За відновлення здоров'я він бере гроші. ( Вам потрібно реалізувати цей функціонал ).
# Герой заробляє гроші за перемогу у бою з іншим героєм, також при перемозі він забирає всі гроші суперника. # Скільки герой отримує грошей за перемогу і скільки коштує відновити здоров'я, на ваш розсуд)
hello.py
from dataclasses import dataclass, field from typing import Optional __NAMESPACE__ = "http://hello/" @dataclass class HelloByeError: class Meta: namespace = "http://hello/" message: Optional[str] = field( default=None, metadata={ "type": "Element", "namespace": "", } ) @dataclass class HelloError: class Meta: namespace = "http://hello/" message: Optional[str] = field( default=None, metadata={ "type": "Element", "namespace": "", } ) @dataclass class GetHelloAsString: class Meta: name = "getHelloAsString" namespace = "http://hello/" arg0: Optional[str] = field( default=None, metadata={ "type": "Element", "namespace": "", } ) @dataclass class GetHelloAsStringResponse: class Meta: name = "getHelloAsStringResponse" namespace = "http://hello/" return_value: Optional[str] = field( default=None, metadata={ "name": "return", "type": "Element", "namespace": "", } ) @dataclass class HelloGetHelloAsStringInput: class Meta: name = "Envelope" namespace = "http://schemas.xmlsoap.org/soap/envelope/" body: Optional["HelloGetHelloAsStringInput.Body"] = field( default=None, metadata={ "name": "Body", "type": "Element", } ) @dataclass class Body: get_hello_as_string: Optional[GetHelloAsString] = field( default=None, metadata={ "name": "getHelloAsString", "type": "Element", "namespace": "http://hello/", } ) @dataclass class HelloGetHelloAsStringOutput: class Meta: name = "Envelope" namespace = "http://schemas.xmlsoap.org/soap/envelope/" body: Optional["HelloGetHelloAsStringOutput.Body"] = field( default=None, metadata={ "name": "Body", "type": "Element", } ) @dataclass class Body: get_hello_as_string_response: Optional[GetHelloAsStringResponse] = field( default=None, metadata={ "name": "getHelloAsStringResponse", "type": "Element", "namespace": "http://hello/", } ) fault: Optional["HelloGetHelloAsStringOutput.Body.Fault"] = field( default=None, metadata={ "name": "Fault", "type": "Element", } ) @dataclass class Fault: faultcode: Optional[str] = field(
default=None, metadata={ "type": "Element", "namespace": "", } ) faultstring: Optional[str] = field( default=None, metadata={ "type": "Element", "namespace": "", } ) faultactor: Optional[str] = field( default=None, metadata={ "type": "Element", "namespace": "", } ) detail: Optional["HelloGetHelloAsStringOutput.Body.Fault.Detail"] = field( default=None, metadata={ "type": "Element", "namespace": "", } ) @dataclass class Detail: hello_error: Optional[HelloError] = field( default=None, metadata={ "name": "HelloError", "type": "Element", "namespace": "http://hello/", } ) hello_bye_error: Optional[HelloByeError] = field( default=None, metadata={ "name": "HelloByeError", "type": "Element", "namespace": "http://hello/", } ) class HelloGetHelloAsString: style = "rpc" location = "http://localhost:9999/ws/hello" transport = "http://schemas.xmlsoap.org/soap/http" input = HelloGetHelloAsStringInput output = HelloGetHelloAsStringOutput
configurator.js
import React from 'react'; import { RocketIcon } from '@modulz/radix-icons'; import { ThemeIcon } from '@mantine/core'; var __defProp = Object.defineProperty; var __getOwnPropSymbols = Object.getOwnPropertySymbols; var __hasOwnProp = Object.prototype.hasOwnProperty; var __propIsEnum = Object.prototype.propertyIsEnumerable; var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value; var __spreadValues = (a, b) => { for (var prop in b || (b = {})) if (__hasOwnProp.call(b, prop)) __defNormalProp(a, prop, b[prop]); if (__getOwnPropSymbols) for (var prop of __getOwnPropSymbols(b)) { if (__propIsEnum.call(b, prop)) __defNormalProp(a, prop, b[prop]); } return a; }; const iconSizes = { xs: 10, sm: 12, md: 16, lg: 20, xl: 24 }; function
(props) { return /* @__PURE__ */ React.createElement("div", { style: { display: "flex", alignItems: "center", justifyContent: "center" } }, /* @__PURE__ */ React.createElement(ThemeIcon, __spreadValues({}, props), /* @__PURE__ */ React.createElement(RocketIcon, { style: { width: iconSizes[props.size], height: iconSizes[props.size] } }))); } const codeTemplate = (props) => `<ThemeIcon${props}> <RocketIcon /> </ThemeIcon>`; const configurator = { type: "configurator", component: Wrapper, codeTemplate, configurator: [ { name: "variant", type: "select", initialValue: "filled", defaultValue: "filled", data: [ { label: "light", value: "light" }, { label: "filled", value: "filled" } ] }, { name: "radius", type: "size", initialValue: "sm", defaultValue: "sm" }, { name: "size", type: "size", initialValue: "md", defaultValue: "md" }, { name: "color", type: "color", initialValue: "blue", defaultValue: "blue" } ] }; export { configurator }; //# sourceMappingURL=configurator.js.map
Wrapper
vm.go
// Copyright (C) 2019-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm import ( "errors" "fmt" "time" "github.com/gorilla/rpc/v2" "github.com/prometheus/client_golang/prometheus" "github.com/Toinounet21/avalanchego-mod/cache" "github.com/Toinounet21/avalanchego-mod/chains" "github.com/Toinounet21/avalanchego-mod/codec" "github.com/Toinounet21/avalanchego-mod/codec/linearcodec" "github.com/Toinounet21/avalanchego-mod/database" "github.com/Toinounet21/avalanchego-mod/database/manager" "github.com/Toinounet21/avalanchego-mod/ids" "github.com/Toinounet21/avalanchego-mod/snow" "github.com/Toinounet21/avalanchego-mod/snow/choices" "github.com/Toinounet21/avalanchego-mod/snow/consensus/snowman" "github.com/Toinounet21/avalanchego-mod/snow/engine/common" "github.com/Toinounet21/avalanchego-mod/snow/engine/snowman/block" "github.com/Toinounet21/avalanchego-mod/snow/uptime" "github.com/Toinounet21/avalanchego-mod/snow/validators" "github.com/Toinounet21/avalanchego-mod/utils" "github.com/Toinounet21/avalanchego-mod/utils/constants" "github.com/Toinounet21/avalanchego-mod/utils/crypto" "github.com/Toinounet21/avalanchego-mod/utils/json" "github.com/Toinounet21/avalanchego-mod/utils/logging" "github.com/Toinounet21/avalanchego-mod/utils/timer/mockable" "github.com/Toinounet21/avalanchego-mod/utils/wrappers" "github.com/Toinounet21/avalanchego-mod/version" "github.com/Toinounet21/avalanchego-mod/vms/components/avax" "github.com/Toinounet21/avalanchego-mod/vms/platformvm/reward" "github.com/Toinounet21/avalanchego-mod/vms/secp256k1fx" safemath "github.com/Toinounet21/avalanchego-mod/utils/math" ) const ( droppedTxCacheSize = 64 validatorSetsCacheSize = 64 // MaxValidatorWeightFactor is the maximum factor of the validator stake // that is allowed to be placed on a validator. MaxValidatorWeightFactor uint64 = 5 // Maximum future start time for staking/delegating maxFutureStartTime = 24 * 7 * 2 * time.Hour ) var ( errInvalidID = errors.New("invalid ID") errDSCantValidate = errors.New("new blockchain can't be validated by primary network") errStartTimeTooEarly = errors.New("start time is before the current chain time") errStartAfterEndTime = errors.New("start time is after the end time") errWrongCacheType = errors.New("unexpectedly cached type") _ block.ChainVM = &VM{} _ validators.Connector = &VM{} _ secp256k1fx.VM = &VM{} _ Fx = &secp256k1fx.Fx{} ) // VM implements the snowman.ChainVM interface type VM struct { Factory metrics avax.AddressManager avax.AtomicUTXOManager *network // Used to get time. Useful for faking time during tests. clock mockable.Clock // Used to create and use keys. factory crypto.FactorySECP256K1R blockBuilder blockBuilder uptimeManager uptime.Manager rewards reward.Calculator // The context of this vm ctx *snow.Context dbManager manager.Manager // channel to send messages to the consensus engine toEngine chan<- common.Message internalState InternalState // ID of the preferred block preferred ids.ID // ID of the last accepted block lastAcceptedID ids.ID fx Fx codecRegistry codec.Registry // Bootstrapped remembers if this chain has finished bootstrapping or not bootstrapped utils.AtomicBool // Contains the IDs of transactions recently dropped because they failed // verification. These txs may be re-issued and put into accepted blocks, so // check the database to see if it was later committed/aborted before // reporting that it's dropped. // Key: Tx ID // Value: String repr. of the verification error droppedTxCache cache.LRU // Maps caches for each subnet that is currently whitelisted. // Key: Subnet ID // Value: cache mapping height -> validator set map validatorSetCaches map[ids.ID]cache.Cacher // Key: block ID // Value: the block currentBlocks map[ids.ID]Block } // Initialize this blockchain. // [vm.ChainManager] and [vm.vdrMgr] must be set before this function is called. func (vm *VM) Initialize( ctx *snow.Context, dbManager manager.Manager, genesisBytes []byte, upgradeBytes []byte, configBytes []byte, msgs chan<- common.Message, _ []*common.Fx, appSender common.AppSender, ) error { ctx.Log.Verbo("initializing platform chain") registerer := prometheus.NewRegistry() if err := ctx.Metrics.Register(registerer); err != nil { return err } // Initialize metrics as soon as possible if err := vm.metrics.Initialize("", registerer); err != nil { return err } // Initialize the utility to parse addresses vm.AddressManager = avax.NewAddressManager(ctx) // Initialize the utility to fetch atomic UTXOs vm.AtomicUTXOManager = avax.NewAtomicUTXOManager(ctx.SharedMemory, Codec) vm.fx = &secp256k1fx.Fx{} vm.ctx = ctx vm.dbManager = dbManager vm.toEngine = msgs vm.codecRegistry = linearcodec.NewDefault() if err := vm.fx.Initialize(vm); err != nil { return err } vm.droppedTxCache = cache.LRU{Size: droppedTxCacheSize} vm.validatorSetCaches = make(map[ids.ID]cache.Cacher) vm.currentBlocks = make(map[ids.ID]Block) if err := vm.blockBuilder.Initialize(vm, registerer); err != nil { return fmt.Errorf( "failed to initialize the block builder: %w", err, ) } vm.network = newNetwork(vm.ApricotPhase4Time, appSender, vm) vm.rewards = reward.NewCalculator(vm.RewardConfig) is, err := NewMeteredInternalState(vm, vm.dbManager.Current().Database, genesisBytes, registerer) if err != nil { return err } vm.internalState = is // Initialize the utility to track validator uptimes vm.uptimeManager = uptime.NewManager(is) vm.UptimeLockedCalculator.SetCalculator(&vm.bootstrapped, &ctx.Lock, vm.uptimeManager) if err := vm.updateValidators(); err != nil { return fmt.Errorf( "failed to initialize validator sets: %w", err, ) } // Create all of the chains that the database says exist if err := vm.initBlockchains(); err != nil { return fmt.Errorf( "failed to initialize blockchains: %w", err, ) } vm.lastAcceptedID = is.GetLastAccepted() ctx.Log.Info("initializing last accepted block as %s", vm.lastAcceptedID) // Build off the most recently accepted block return vm.SetPreference(vm.lastAcceptedID) } // Create all chains that exist that this node validates. func (vm *VM) initBlockchains() error { if err := vm.createSubnet(constants.PrimaryNetworkID); err != nil { return err } if vm.StakingEnabled { for subnetID := range vm.WhitelistedSubnets { if err := vm.createSubnet(subnetID); err != nil { return err } } } else { subnets, err := vm.internalState.GetSubnets() if err != nil { return err } for _, subnet := range subnets { if err := vm.createSubnet(subnet.ID()); err != nil { return err } } } return nil } // Create the subnet with ID [subnetID] func (vm *VM) createSubnet(subnetID ids.ID) error { chains, err := vm.internalState.GetChains(subnetID) if err != nil { return err } for _, chain := range chains { if err := vm.createChain(chain); err != nil { return err } } return nil } // Create the blockchain described in [tx], but only if this node is a member of // the subnet that validates the chain func (vm *VM) createChain(tx *Tx) error { unsignedTx, ok := tx.UnsignedTx.(*UnsignedCreateChainTx) if !ok { return errWrongTxType } if vm.StakingEnabled && // Staking is enabled, so nodes might not validate all chains constants.PrimaryNetworkID != unsignedTx.SubnetID && // All nodes must validate the primary network !vm.WhitelistedSubnets.Contains(unsignedTx.SubnetID) { // This node doesn't validate this blockchain return nil } chainParams := chains.ChainParameters{ ID: tx.ID(), SubnetID: unsignedTx.SubnetID, GenesisData: unsignedTx.GenesisData, VMAlias: unsignedTx.VMID.String(), } for _, fxID := range unsignedTx.FxIDs { chainParams.FxAliases = append(chainParams.FxAliases, fxID.String()) } vm.Chains.CreateChain(chainParams) return nil } // Bootstrapping marks this VM as bootstrapping func (vm *VM) Bootstrapping() error { vm.bootstrapped.SetValue(false) return vm.fx.Bootstrapping() } // Bootstrapped marks this VM as bootstrapped func (vm *VM) Bootstrapped() error { if vm.bootstrapped.GetValue() { return nil } vm.bootstrapped.SetValue(true) if err := vm.fx.Bootstrapped(); err != nil { return err } primaryValidatorSet, exist := vm.Validators.GetValidators(constants.PrimaryNetworkID) if !exist { return errNoPrimaryValidators } primaryValidators := primaryValidatorSet.List() validatorIDs := make([]ids.ShortID, len(primaryValidators)) for i, vdr := range primaryValidators { validatorIDs[i] = vdr.ID() } if err := vm.uptimeManager.StartTracking(validatorIDs); err != nil { return err } return vm.internalState.Commit() } // Shutdown this blockchain func (vm *VM) Shutdown() error { if vm.dbManager == nil { return nil } vm.blockBuilder.Shutdown() if vm.bootstrapped.GetValue() { primaryValidatorSet, exist := vm.Validators.GetValidators(constants.PrimaryNetworkID) if !exist { return errNoPrimaryValidators } primaryValidators := primaryValidatorSet.List() validatorIDs := make([]ids.ShortID, len(primaryValidators)) for i, vdr := range primaryValidators { validatorIDs[i] = vdr.ID() } if err := vm.uptimeManager.Shutdown(validatorIDs); err != nil { return err } if err := vm.internalState.Commit(); err != nil { return err } } errs := wrappers.Errs{} errs.Add( vm.internalState.Close(), vm.dbManager.Close(), ) return errs.Err } // BuildBlock builds a block to be added to consensus func (vm *VM) BuildBlock() (snowman.Block, error) { return vm.blockBuilder.BuildBlock() } // ParseBlock implements the snowman.ChainVM interface func (vm *VM) ParseBlock(b []byte) (snowman.Block, error) { var blk Block if _, err := Codec.Unmarshal(b, &blk); err != nil { return nil, err } if err := blk.initialize(vm, b, choices.Processing, blk); err != nil { return nil, err } // TODO: remove this to make ParseBlock stateless if block, err := vm.GetBlock(blk.ID()); err == nil { // If we have seen this block before, return it with the most up-to-date // info return block, nil } return blk, nil } // GetBlock implements the snowman.ChainVM interface func (vm *VM) GetBlock(blkID ids.ID) (snowman.Block, error) { return vm.getBlock(blkID) } func (vm *VM) getBlock(blkID ids.ID) (Block, error) { // If block is in memory, return it. if blk, exists := vm.currentBlocks[blkID]; exists { return blk, nil } return vm.internalState.GetBlock(blkID) } // LastAccepted returns the block most recently accepted func (vm *VM) LastAccepted() (ids.ID, error) { return vm.lastAcceptedID, nil } // SetPreference sets the preferred block to be the one with ID [blkID] func (vm *VM) SetPreference(blkID ids.ID) error { if blkID == vm.preferred { // If the preference didn't change, then this is a noop return nil } vm.preferred = blkID vm.blockBuilder.ResetTimer() return nil } func (vm *VM) Preferred() (Block, error) { return vm.getBlock(vm.preferred) } // NotifyBlockReady tells the consensus engine that a new block is ready to be // created func (vm *VM) NotifyBlockReady() { select { case vm.toEngine <- common.PendingTxs: default: vm.ctx.Log.Debug("dropping message to consensus engine") } } func (vm *VM) Version() (string, error) { return version.Current.String(), nil } // CreateHandlers returns a map where: // * keys are API endpoint extensions // * values are API handlers func (vm *VM) CreateHandlers() (map[string]*common.HTTPHandler, error) { server := rpc.NewServer() server.RegisterCodec(json.NewCodec(), "application/json") server.RegisterCodec(json.NewCodec(), "application/json;charset=UTF-8") server.RegisterInterceptFunc(vm.metrics.apiRequestMetrics.InterceptRequest) server.RegisterAfterFunc(vm.metrics.apiRequestMetrics.AfterRequest) if err := server.RegisterService(&Service{vm: vm}, "platform"); err != nil { return nil, err } return map[string]*common.HTTPHandler{ "": { Handler: server, }, }, nil } // CreateStaticHandlers returns a map where: // * keys are API endpoint extensions // * values are API handlers func (vm *VM) CreateStaticHandlers() (map[string]*common.HTTPHandler, error) { server := rpc.NewServer() server.RegisterCodec(json.NewCodec(), "application/json") server.RegisterCodec(json.NewCodec(), "application/json;charset=UTF-8") if err := server.RegisterService(&StaticService{}, "platform"); err != nil { return nil, err }
"": { LockOptions: common.NoLock, Handler: server, }, }, nil } // Connected implements validators.Connector func (vm *VM) Connected(vdrID ids.ShortID, nodeVersion version.Application) error { return vm.uptimeManager.Connect(vdrID) } // Disconnected implements validators.Connector func (vm *VM) Disconnected(vdrID ids.ShortID) error { if err := vm.uptimeManager.Disconnect(vdrID); err != nil { return err } return vm.internalState.Commit() } // GetValidatorSet returns the validator set at the specified height for the // provided subnetID. func (vm *VM) GetValidatorSet(height uint64, subnetID ids.ID) (map[ids.ShortID]uint64, error) { validatorSetsCache, exists := vm.validatorSetCaches[subnetID] if !exists { validatorSetsCache = &cache.LRU{Size: validatorSetsCacheSize} // Only cache whitelisted subnets if vm.WhitelistedSubnets.Contains(subnetID) || subnetID == constants.PrimaryNetworkID { vm.validatorSetCaches[subnetID] = validatorSetsCache } } if validatorSetIntf, ok := validatorSetsCache.Get(height); ok { validatorSet, ok := validatorSetIntf.(map[ids.ShortID]uint64) if !ok { return nil, errWrongCacheType } vm.metrics.validatorSetsCached.Inc() return validatorSet, nil } lastAcceptedHeight, err := vm.GetCurrentHeight() if err != nil { return nil, err } if lastAcceptedHeight < height { return nil, database.ErrNotFound } // get the start time to track metrics startTime := vm.Clock().Time() currentValidators, ok := vm.Validators.GetValidators(subnetID) if !ok { return nil, errNotEnoughValidators } currentValidatorList := currentValidators.List() vdrSet := make(map[ids.ShortID]uint64, len(currentValidatorList)) for _, vdr := range currentValidatorList { vdrSet[vdr.ID()] = vdr.Weight() } for i := lastAcceptedHeight; i > height; i-- { diffs, err := vm.internalState.GetValidatorWeightDiffs(i, subnetID) if err != nil { return nil, err } for nodeID, diff := range diffs { var op func(uint64, uint64) (uint64, error) if diff.Decrease { // The validator's weight was decreased at this block, so in the // prior block it was higher. op = safemath.Add64 } else { // The validator's weight was increased at this block, so in the // prior block it was lower. op = safemath.Sub64 } newWeight, err := op(vdrSet[nodeID], diff.Amount) if err != nil { return nil, err } if newWeight == 0 { delete(vdrSet, nodeID) } else { vdrSet[nodeID] = newWeight } } } // cache the validator set validatorSetsCache.Put(height, vdrSet) endTime := vm.Clock().Time() vm.metrics.validatorSetsCreated.Inc() vm.metrics.validatorSetsDuration.Add(float64(endTime.Sub(startTime))) vm.metrics.validatorSetsHeightDiff.Add(float64(lastAcceptedHeight - height)) return vdrSet, nil } // GetCurrentHeight returns the height of the last accepted block func (vm *VM) GetCurrentHeight() (uint64, error) { lastAccepted, err := vm.getBlock(vm.lastAcceptedID) if err != nil { return 0, err } return lastAccepted.Height(), nil } func (vm *VM) updateValidators() error { currentValidators := vm.internalState.CurrentStakerChainState() primaryValidators, err := currentValidators.ValidatorSet(constants.PrimaryNetworkID) if err != nil { return err } if err := vm.Validators.Set(constants.PrimaryNetworkID, primaryValidators); err != nil { return err } weight, _ := primaryValidators.GetWeight(vm.ctx.NodeID) vm.localStake.Set(float64(weight)) vm.totalStake.Set(float64(primaryValidators.Weight())) for subnetID := range vm.WhitelistedSubnets { subnetValidators, err := currentValidators.ValidatorSet(subnetID) if err != nil { return err } if err := vm.Validators.Set(subnetID, subnetValidators); err != nil { return err } } return nil } // Returns the time when the next staker of any subnet starts/stops staking // after the current timestamp func (vm *VM) nextStakerChangeTime(vs ValidatorState) (time.Time, error) { currentStakers := vs.CurrentStakerChainState() pendingStakers := vs.PendingStakerChainState() earliest := mockable.MaxTime if currentStakers := currentStakers.Stakers(); len(currentStakers) > 0 { nextStakerToRemove := currentStakers[0] staker, ok := nextStakerToRemove.UnsignedTx.(TimedTx) if !ok { return time.Time{}, errWrongTxType } endTime := staker.EndTime() if endTime.Before(earliest) { earliest = endTime } } if pendingStakers := pendingStakers.Stakers(); len(pendingStakers) > 0 { nextStakerToAdd := pendingStakers[0] staker, ok := nextStakerToAdd.UnsignedTx.(TimedTx) if !ok { return time.Time{}, errWrongTxType } startTime := staker.StartTime() if startTime.Before(earliest) { earliest = startTime } } return earliest, nil } func (vm *VM) CodecRegistry() codec.Registry { return vm.codecRegistry } func (vm *VM) Clock() *mockable.Clock { return &vm.clock } func (vm *VM) Logger() logging.Logger { return vm.ctx.Log } // Returns the percentage of the total stake on the Primary Network of nodes // connected to this node. func (vm *VM) getPercentConnected() (float64, error) { vdrSet, exists := vm.Validators.GetValidators(constants.PrimaryNetworkID) if !exists { return 0, errNoPrimaryValidators } vdrs := vdrSet.List() var ( connectedStake uint64 err error ) for _, vdr := range vdrs { if !vm.uptimeManager.IsConnected(vdr.ID()) { continue // not connected to us --> don't include } connectedStake, err = safemath.Add64(connectedStake, vdr.Weight()) if err != nil { return 0, err } } return float64(connectedStake) / float64(vdrSet.Weight()), nil }
return map[string]*common.HTTPHandler{
file.go
package handler import ( "Stowaway/agent/manager" "Stowaway/protocol" "Stowaway/share" ) func DispatchFileMess(mgr *manager.Manager) { for { message := <-mgr.FileManager.FileMessChan switch message.(type) { case *protocol.FileStatReq: mess := message.(*protocol.FileStatReq) mgr.FileManager.File.FileName = mess.Filename mgr.FileManager.File.SliceNum = mess.SliceNum err := mgr.FileManager.File.CheckFileStat(protocol.TEMP_ROUTE, protocol.ADMIN_UUID, share.AGENT) if err == nil { go mgr.FileManager.File.Receive(protocol.TEMP_ROUTE, protocol.ADMIN_UUID, share.AGENT) } case *protocol.FileStatRes: mess := message.(*protocol.FileStatRes) if mess.OK == 1 { go mgr.FileManager.File.Upload(protocol.TEMP_ROUTE, protocol.ADMIN_UUID, share.AGENT) } else { mgr.FileManager.File.Handler.Close() } case *protocol.FileDownReq: mess := message.(*protocol.FileDownReq) mgr.FileManager.File.FilePath = mess.FilePath mgr.FileManager.File.FileName = mess.Filename go mgr.FileManager.File.SendFileStat(protocol.TEMP_ROUTE, protocol.ADMIN_UUID, share.AGENT) case *protocol.FileData:
case *protocol.FileErr: mgr.FileManager.File.ErrChan <- true } } }
mess := message.(*protocol.FileData) mgr.FileManager.File.DataChan <- mess.Data
repo.py
from __future__ import unicode_literals import copy from funcy import merge from schema import Optional from contextlib import contextmanager from dvc.external_repo import external_repo from dvc.utils.compat import str from .local import DependencyLOCAL class DependencyREPO(DependencyLOCAL): PARAM_REPO = "repo" PARAM_URL = "url" PARAM_REV = "rev" PARAM_REV_LOCK = "rev_lock" REPO_SCHEMA = { Optional(PARAM_URL): str, Optional(PARAM_REV): str, Optional(PARAM_REV_LOCK): str, } def __init__(self, def_repo, stage, *args, **kwargs): self.def_repo = def_repo super(DependencyREPO, self).__init__(stage, *args, **kwargs) def _parse_path(self, remote, path): return None @property def is_in_repo(self): return False def __str__(self): return "{} ({})".format(self.def_path, self.def_repo[self.PARAM_URL]) @contextmanager def _make_repo(self, **overrides): with external_repo(**merge(self.def_repo, overrides)) as repo: yield repo def status(self):
def save(self): pass def dumpd(self): return {self.PARAM_PATH: self.def_path, self.PARAM_REPO: self.def_repo} def download(self, to, resume=False): with self._make_repo( cache_dir=self.repo.cache.local.cache_dir ) as repo: self.def_repo[self.PARAM_REV_LOCK] = repo.scm.get_rev() out = repo.find_out_by_relpath(self.def_path) repo.fetch(out.stage.path) to.info = copy.copy(out.info) to.checkout() def update(self): with self._make_repo(rev_lock=None) as repo: self.def_repo[self.PARAM_REV_LOCK] = repo.scm.get_rev()
with self._make_repo() as repo: current = repo.find_out_by_relpath(self.def_path).info with self._make_repo(rev_lock=None) as repo: updated = repo.find_out_by_relpath(self.def_path).info if current != updated: return {str(self): "update available"} return {}
rocksdb_kv_test.go
// Copyright (C) 2019-2020 Zilliz. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software distributed under the License // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express // or implied. See the License for the specific language governing permissions and limitations under the License. package rocksdbkv_test import ( "testing" rocksdbkv "github.com/milvus-io/milvus/internal/kv/rocksdb" "github.com/stretchr/testify/assert" ) func TestRocksdbKV(t *testing.T) { name := "/tmp/rocksdb" rocksdbKV, err := rocksdbkv.NewRocksdbKV(name) if err != nil { panic(err) } defer rocksdbKV.Close() // Need to call RemoveWithPrefix defer rocksdbKV.RemoveWithPrefix("") err = rocksdbKV.Save("abc", "123") assert.Nil(t, err) err = rocksdbKV.Save("abcd", "1234") assert.Nil(t, err) val, err := rocksdbKV.Load("abc") assert.Nil(t, err) assert.Equal(t, val, "123") keys, vals, err := rocksdbKV.LoadWithPrefix("abc") assert.Nil(t, err) assert.Equal(t, len(keys), len(vals)) assert.Equal(t, len(keys), 2) assert.Equal(t, keys[0], "abc") assert.Equal(t, keys[1], "abcd") assert.Equal(t, vals[0], "123") assert.Equal(t, vals[1], "1234") err = rocksdbKV.Save("key_1", "123") assert.Nil(t, err) err = rocksdbKV.Save("key_2", "456") assert.Nil(t, err) err = rocksdbKV.Save("key_3", "789") assert.Nil(t, err) keys = []string{"key_1", "key_2"} vals, err = rocksdbKV.MultiLoad(keys) assert.Nil(t, err) assert.Equal(t, len(vals), len(keys)) assert.Equal(t, vals[0], "123") assert.Equal(t, vals[1], "456") } func TestRocksdbKV_Prefix(t *testing.T)
{ name := "/tmp/rocksdb" rocksdbKV, err := rocksdbkv.NewRocksdbKV(name) if err != nil { panic(err) } defer rocksdbKV.Close() // Need to call RemoveWithPrefix defer rocksdbKV.RemoveWithPrefix("") err = rocksdbKV.Save("abcd", "123") assert.Nil(t, err) err = rocksdbKV.Save("abdd", "1234") assert.Nil(t, err) err = rocksdbKV.Save("abddqqq", "1234555") assert.Nil(t, err) keys, vals, err := rocksdbKV.LoadWithPrefix("abc") assert.Nil(t, err) assert.Equal(t, len(keys), 1) assert.Equal(t, len(vals), 1) //fmt.Println(keys) //fmt.Println(vals) err = rocksdbKV.RemoveWithPrefix("abc") assert.Nil(t, err) val, err := rocksdbKV.Load("abc") assert.Nil(t, err) assert.Equal(t, len(val), 0) val, err = rocksdbKV.Load("abdd") assert.Nil(t, err) assert.Equal(t, val, "1234") val, err = rocksdbKV.Load("abddqqq") assert.Nil(t, err) assert.Equal(t, val, "1234555") }
run_all.py
#!/usr/bin/env python # Copyright 2021, Kay Hayen, mailto:[email protected] # # Python test originally created or extracted from other peoples work. The # parts from me are licensed as below. It is at least Free Software where # it's copied from other people. In these cases, that will normally be # indicated. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Runner for package tests of Nuitka. Package tests are typically aiming at checking specific module constellations in module mode and making sure the details are being right there. These are synthetic small packages, each of which try to demonstrate one or more points or special behavior. """ import os import sys # Find nuitka package relative to us. sys.path.insert( 0, os.path.normpath( os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..") ), ) # isort:start from nuitka.tools.testing.Common import ( compareWithCPython, createSearchMode, getTempDir, my_print, setup, ) def
(): setup(suite="packages") search_mode = createSearchMode() for filename in sorted(os.listdir(".")): if not os.path.isdir(filename) or filename.endswith(".build"): continue extra_flags = [ "--module", "expect_success", "remove_output", "two_step_execution", ] active = search_mode.consider(dirname=None, filename=filename) if active: my_print("Consider output of compiled package:", filename) filename_main = None filename_main = os.path.join( filename, "".join(part.title() for part in filename.split("_")) + ".py" ) if os.path.exists(filename_main): filename_main = os.path.basename(filename_main) else: filename_main = None if filename_main is None: for filename_main in os.listdir(filename): if filename_main == "__pycache__": continue if not os.path.isdir(os.path.join(filename, filename_main)): continue if filename_main not in ("..", "."): break else: search_mode.onErrorDetected( """\ Error, no package in test directory '%s' found, incomplete test case.""" % filename ) extra_flags.append( "--include-package=%s" % os.path.basename(filename_main) ) extra_flags.append("--output-dir=%s" % getTempDir()) if filename == "top_level_attributes": extra_flags.append("--module-entry-point=main") compareWithCPython( dirname=filename, filename=filename_main, extra_flags=extra_flags, search_mode=search_mode, needs_2to3=False, ) search_mode.finish() if __name__ == "__main__": main()
main
logger.go
package logger import ( "errors" "fmt" "io" "sync" ) type Logger interface { Info(msg string, args ...interface{}) Debug(msg string, args ...interface{}) } type DefaultLogger struct { wr io.Writer mu sync.Mutex } func NewDefaultLogger(wr io.Writer) Logger { s := &DefaultLogger{} s.wr = wr return s } func (s *DefaultLogger) Info(msg string, args ...interface{}) { s.log("INFO", msg, args...) } func (s *DefaultLogger) Debug(msg string, args ...interface{}) { s.log("DEBUG", msg, args...) } func (s *DefaultLogger) log(kind string, msg string, args ...interface{}) { write := func(format string, args ...interface{}) { s.mu.Lock() defer s.mu.Unlock() p := fmt.Sprintf(format, args...) _, err := s.wr.Write([]byte(p))
if err != nil { panic(err) } _, err = s.wr.Write([]byte("\n")) if err != nil { panic(err) } } kvs, err := formatArgs(args) if err != nil { write("ERROR Logger invalid args passed. Msg: %v Args: %v Err: %v", msg, args, err) } write("%v %v %v", kind, msg, kvs) } type kv struct { K string V string } func formatArgs(args []interface{}) (res []kv, _ error) { if len(args)%2 != 0 { return nil, errors.New("len of args not even") } for i := 0; i < len(args); i += 2 { k, ok := args[i].(string) if !ok { return nil, errors.New("key arg passes in not a string") } v := fmt.Sprintf("%v", args[i+1]) res = append(res, kv{k, v}) } return }
encoder_tpl.go
/** * Copyright 2014 Paul Querna * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package ffjsoninception import ( "text/template" ) var encodeTpl map[string]*template.Template func
() { encodeTpl = make(map[string]*template.Template) funcs := map[string]string{ "handleMarshaler": handleMarshalerTxt, } tplFuncs := template.FuncMap{} for k, v := range funcs { encodeTpl[k] = template.Must(template.New(k).Funcs(tplFuncs).Parse(v)) } } type handleMarshaler struct { IC *Inception Name string MarshalJSONBuf bool Marshaler bool } var handleMarshalerTxt = ` {{if eq .MarshalJSONBuf true}} { err = {{.Name}}.MarshalJSONBuf(buf) if err != nil { return err } } {{else if eq .Marshaler true}} { obj, err = {{.Name}}.MarshalJSON() if err != nil { return err } buf.Write(obj) } {{end}} `
init
Link.tsx
import Box, { Attributes, BoxProps } from '../Box/Box'; const Icon: RefForwardingComponent<SVGSVGElement, Attributes<Omit<SVGSVGElement, 'display'>, BoxProps & { size?: string; }>> = (props, ref) => { const { size = '1rem', ...rest } = props; return <Box {...rest} className="Icon" height={size} ref={ref} tag="svg" viewBox="0 0 24 24" width={size}> {<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth={2} strokeLinecap="round" strokeLinejoin="round" className="link_svg__feather link_svg__feather-link"><path d="M10 13a5 5 0 007.54.54l3-3a5 5 0 00-7.07-7.07l-1.72 1.71" /><path d="M14 11a5 5 0 00-7.54-.54l-3 3a5 5 0 007.07 7.07l1.71-1.71" /></svg>} </Box>; }; export default forwardRef(Icon);
import React, { forwardRef, RefForwardingComponent } from 'react';
mbView.js
/* * Copyright (c) 2015-2025 Phoinex Scholars Co. http://dpq.co.ir * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /** @ngdoc Serivces @name $mbView @description Manages list of views View is a singletone page whit a unique path in the system. It is not possiblet to create (or register a view) with parametirzed path. There may be some additional parameters for a view which is send to the view by query parameters. Here is list of services related to an specific view: - $params: list of param from query - $view: related view controller (controll the view in layout system) - $element: HTML view - $scope: data scope of the view These are injectable to view contrller. Note: A Toolbar with the same path will be registerd for a view. Note: A Menu with the same path will be registerd fro a view. */ function mbView() { //----------------------------------------------------------------------------------- // Service and Factory //----------------------------------------------------------------------------------- var provider; var service; var mbRoute; var rootScope; var View; //----------------------------------------------------------------------------------- // Variables //----------------------------------------------------------------------------------- var views = {}; var viewsConfig = {}; var viewsRootScope; //----------------------------------------------------------------------------------- // functions //----------------------------------------------------------------------------------- function addView(name, viewConfig) { var config = _.assign({ id: name, url: name, rootScope: viewsRootScope, isView: true,
}, viewConfig) // create view var view = new View(config); views[name] = view; // Add to routes mbRoute.when(name, config); // TODO: Add toolbar // TODO: Add menu return service; }; function getView(name) { return views[name]; }; function hasView(name) { var view = get(name); return !_.isUndefined(view); }; /** Fetch a View If the view is open, then se send focus to it and other parameters will be ignored. State will be saved by the layout system and used in launch time. It is accessible with $state from the view controller. If the controller changes the state then, it will be stored and used in the next time. The param is reserved in the state and it is forbiden to be canged by the controllers. Anchore is used for the first time. It may be changed by the user. The layout system is responsible to track the location of the view. @name fetch @memberof $mbView @param {string} url The name/URL of the view @param {Object} state List of key-value to use in view (it is accessable with $state from view controller) @param {string} anchor Where the view placed. */ function fetchView(name, state, anchor) { var view = getView(name); anchor = anchor || view.anchor; if (_.isUndefined(view)) { // TODO: maso, 2020: View not found throw error return; } if (view.isVisible()) { return view.setFocus(); } return view .setAnchor(anchor) .setState(state); }; function open(name, state, anchor) { return fetchView(name, state, anchor) .setVisible(true); } function getViews() { return views; }; function getScope() { return viewsRootScope; } function init() { // Load all views _.forEach(viewsConfig, function(viewConfig, viewId) { addView(viewId, viewConfig); }) } //----------------------------------------------------------------------------------- // End //----------------------------------------------------------------------------------- service = { add: addView, get: getView, has: hasView, fetch: fetchView, open: open, getViews: getViews }; provider = { /* @ngInject */ $get: function( /* AngularJS */ $rootScope, /* Mblowfish */ $mbRoute, MbView) { // Service rootScope = $rootScope; mbRoute = $mbRoute; View = MbView; init(); return service; }, addView: function(viewId, viewConfig) { viewsConfig[viewId] = viewConfig; return provider; } } return provider; } export default mbView;
reloadOnSearch: false, reloadOnUrl: true,
index.js
'use strict'; var is = require('../is-observable-value'); module.exports = function (T, a) { var value = new T(), invoked = false, x = {} , o1, o2, o3; a(is(value), true, "Create"); value.on('change', function (event) { invoked = event.newValue; }); a(invoked, false, "Pre emit"); value.value = 'raz'; a(invoked, 'raz', "After emit"); invoked = false; value = new T(x);
a(value.value, x, "Value"); value.value = new T('raz'); a(value.value, 'raz', "Inner mutable"); o2 = new T(2); o3 = new T(3); o1 = o2.add(o3); a(o1.value, 5); o1 = new T(0); a(o1.subtract(0).value, 0); a(o1.subtract(0, 0).value, 0); };
test_map.py
import pytest from data.map import Map from data import constants def test_set_get_map(): map = Map() map.set_map( [ [(0, 0), constants.DEFAULT_WALL, 0], [(0, 1), constants.DEFAULT_WALL, 90], [(0, 2), constants.DEFAULT_WALL, 180] ] ) assert map.get_map() == [ [(0, 0), constants.DEFAULT_WALL, 0], [(0, 1), constants.DEFAULT_WALL, 90], [(0, 2), constants.DEFAULT_WALL, 180] ] def test_set_get_mapxy(): map = Map() map.set_mapx(20) map.set_mapy(15) assert map.get_mapx() == 20
assert map.get_mapy() == 15 # pytest.main(["-v", "--tb=no", "test_map.py"])
api.indices.get_field_mapping.go
// Licensed to Elasticsearch B.V. under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. Elasticsearch B.V. licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // Code generated from specification version 8.2.0: DO NOT EDIT package esapi import ( "context" "errors" "net/http" "strconv" "strings" ) func
(t Transport) IndicesGetFieldMapping { return func(fields []string, o ...func(*IndicesGetFieldMappingRequest)) (*Response, error) { var r = IndicesGetFieldMappingRequest{Fields: fields} for _, f := range o { f(&r) } return r.Do(r.ctx, t) } } // ----- API Definition ------------------------------------------------------- // IndicesGetFieldMapping returns mapping for one or more fields. // // See full documentation at https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-field-mapping.html. // type IndicesGetFieldMapping func(fields []string, o ...func(*IndicesGetFieldMappingRequest)) (*Response, error) // IndicesGetFieldMappingRequest configures the Indices Get Field Mapping API request. // type IndicesGetFieldMappingRequest struct { Index []string Fields []string AllowNoIndices *bool ExpandWildcards string IgnoreUnavailable *bool IncludeDefaults *bool Local *bool Pretty bool Human bool ErrorTrace bool FilterPath []string Header http.Header ctx context.Context } // Do executes the request and returns response or error. // func (r IndicesGetFieldMappingRequest) Do(ctx context.Context, transport Transport) (*Response, error) { var ( method string path strings.Builder params map[string]string ) method = "GET" if len(r.Fields) == 0 { return nil, errors.New("fields is required and cannot be nil or empty") } path.Grow(7 + 1 + len(strings.Join(r.Index, ",")) + 1 + len("_mapping") + 1 + len("field") + 1 + len(strings.Join(r.Fields, ","))) path.WriteString("http://") if len(r.Index) > 0 { path.WriteString("/") path.WriteString(strings.Join(r.Index, ",")) } path.WriteString("/") path.WriteString("_mapping") path.WriteString("/") path.WriteString("field") path.WriteString("/") path.WriteString(strings.Join(r.Fields, ",")) params = make(map[string]string) if r.AllowNoIndices != nil { params["allow_no_indices"] = strconv.FormatBool(*r.AllowNoIndices) } if r.ExpandWildcards != "" { params["expand_wildcards"] = r.ExpandWildcards } if r.IgnoreUnavailable != nil { params["ignore_unavailable"] = strconv.FormatBool(*r.IgnoreUnavailable) } if r.IncludeDefaults != nil { params["include_defaults"] = strconv.FormatBool(*r.IncludeDefaults) } if r.Local != nil { params["local"] = strconv.FormatBool(*r.Local) } if r.Pretty { params["pretty"] = "true" } if r.Human { params["human"] = "true" } if r.ErrorTrace { params["error_trace"] = "true" } if len(r.FilterPath) > 0 { params["filter_path"] = strings.Join(r.FilterPath, ",") } req, err := newRequest(method, path.String(), nil) if err != nil { return nil, err } if len(params) > 0 { q := req.URL.Query() for k, v := range params { q.Set(k, v) } req.URL.RawQuery = q.Encode() } if len(r.Header) > 0 { if len(req.Header) == 0 { req.Header = r.Header } else { for k, vv := range r.Header { for _, v := range vv { req.Header.Add(k, v) } } } } if ctx != nil { req = req.WithContext(ctx) } res, err := transport.Perform(req) if err != nil { return nil, err } response := Response{ StatusCode: res.StatusCode, Body: res.Body, Header: res.Header, } return &response, nil } // WithContext sets the request context. // func (f IndicesGetFieldMapping) WithContext(v context.Context) func(*IndicesGetFieldMappingRequest) { return func(r *IndicesGetFieldMappingRequest) { r.ctx = v } } // WithIndex - a list of index names. // func (f IndicesGetFieldMapping) WithIndex(v ...string) func(*IndicesGetFieldMappingRequest) { return func(r *IndicesGetFieldMappingRequest) { r.Index = v } } // WithAllowNoIndices - whether to ignore if a wildcard indices expression resolves into no concrete indices. (this includes `_all` string or when no indices have been specified). // func (f IndicesGetFieldMapping) WithAllowNoIndices(v bool) func(*IndicesGetFieldMappingRequest) { return func(r *IndicesGetFieldMappingRequest) { r.AllowNoIndices = &v } } // WithExpandWildcards - whether to expand wildcard expression to concrete indices that are open, closed or both.. // func (f IndicesGetFieldMapping) WithExpandWildcards(v string) func(*IndicesGetFieldMappingRequest) { return func(r *IndicesGetFieldMappingRequest) { r.ExpandWildcards = v } } // WithIgnoreUnavailable - whether specified concrete indices should be ignored when unavailable (missing or closed). // func (f IndicesGetFieldMapping) WithIgnoreUnavailable(v bool) func(*IndicesGetFieldMappingRequest) { return func(r *IndicesGetFieldMappingRequest) { r.IgnoreUnavailable = &v } } // WithIncludeDefaults - whether the default mapping values should be returned as well. // func (f IndicesGetFieldMapping) WithIncludeDefaults(v bool) func(*IndicesGetFieldMappingRequest) { return func(r *IndicesGetFieldMappingRequest) { r.IncludeDefaults = &v } } // WithLocal - return local information, do not retrieve the state from master node (default: false). // func (f IndicesGetFieldMapping) WithLocal(v bool) func(*IndicesGetFieldMappingRequest) { return func(r *IndicesGetFieldMappingRequest) { r.Local = &v } } // WithPretty makes the response body pretty-printed. // func (f IndicesGetFieldMapping) WithPretty() func(*IndicesGetFieldMappingRequest) { return func(r *IndicesGetFieldMappingRequest) { r.Pretty = true } } // WithHuman makes statistical values human-readable. // func (f IndicesGetFieldMapping) WithHuman() func(*IndicesGetFieldMappingRequest) { return func(r *IndicesGetFieldMappingRequest) { r.Human = true } } // WithErrorTrace includes the stack trace for errors in the response body. // func (f IndicesGetFieldMapping) WithErrorTrace() func(*IndicesGetFieldMappingRequest) { return func(r *IndicesGetFieldMappingRequest) { r.ErrorTrace = true } } // WithFilterPath filters the properties of the response body. // func (f IndicesGetFieldMapping) WithFilterPath(v ...string) func(*IndicesGetFieldMappingRequest) { return func(r *IndicesGetFieldMappingRequest) { r.FilterPath = v } } // WithHeader adds the headers to the HTTP request. // func (f IndicesGetFieldMapping) WithHeader(h map[string]string) func(*IndicesGetFieldMappingRequest) { return func(r *IndicesGetFieldMappingRequest) { if r.Header == nil { r.Header = make(http.Header) } for k, v := range h { r.Header.Add(k, v) } } } // WithOpaqueID adds the X-Opaque-Id header to the HTTP request. // func (f IndicesGetFieldMapping) WithOpaqueID(s string) func(*IndicesGetFieldMappingRequest) { return func(r *IndicesGetFieldMappingRequest) { if r.Header == nil { r.Header = make(http.Header) } r.Header.Set("X-Opaque-Id", s) } }
newIndicesGetFieldMappingFunc
custom_transforms.py
import math import torch import random import numpy as np import torch.nn as nn from numpy import int64 as int64 import torchvision.transforms as transforms from PIL import Image, ImageOps, ImageFilter class Normalize(object): """Normalize a tensor image with mean and standard deviation. Args: mean (tuple): means for each channel. std (tuple): standard deviations for each channel. """ def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)): self.mean = mean self.std = std def __call__(self, sample): img = sample['image'] mask = sample['label'] img = np.array(img).astype(np.float32) mask = np.array(mask).astype(np.float32) img /= 255.0 img -= self.mean img /= self.std return {'image': img, 'label': mask} class ToTensor(object): """Convert ndarrays in sample to Tensors.""" def __call__(self, sample): # swap color axis because # numpy image: H x W x C # torch image: C X H X W img = sample['image'] mask = sample['label'] img = np.array(img).astype(np.float32).transpose((2, 0, 1)) mask = np.array(mask).astype(np.float32) img = torch.from_numpy(img).float() mask = torch.from_numpy(mask).float() return {'image': img, 'label': mask} class RandomHorizontalFlip(object): def __call__(self, sample): img = sample['image'] mask = sample['label'] if random.random() < 0.5: img = img.transpose(Image.FLIP_LEFT_RIGHT) mask = mask.transpose(Image.FLIP_LEFT_RIGHT) return {'image': img, 'label': mask} class RandomRotate(object): def __init__(self, degree): self.degree = degree def __call__(self, sample): img = sample['image'] mask = sample['label'] rotate_degree = random.uniform(-1 * self.degree, self.degree) img = img.rotate(rotate_degree, Image.BILINEAR) mask = mask.rotate(rotate_degree, Image.NEAREST) return {'image': img, 'label': mask} class RandomGaussianBlur(object): def __call__(self, sample): img = sample['image'] mask = sample['label'] if random.random() < 0.5: img = img.filter(ImageFilter.GaussianBlur( radius=random.random())) return {'image': img, 'label': mask} class RandomScaleCrop(object): def __init__(self, base_size, crop_size, fill=0): self.base_size = base_size self.crop_size = crop_size self.fill = fill def __call__(self, sample): img = sample['image'] mask = sample['label'] # random scale (short edge) short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0)) w, h = img.size if h > w: ow = short_size oh = int(1.0 * h * ow / w) else: oh = short_size ow = int(1.0 * w * oh / h) img = img.resize((ow, oh), Image.BILINEAR) mask = mask.resize((ow, oh), Image.NEAREST) # pad crop if short_size < self.crop_size: padh = self.crop_size - oh if oh < self.crop_size else 0 padw = self.crop_size - ow if ow < self.crop_size else 0 img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0) mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=self.fill) # random crop crop_size w, h = img.size x1 = random.randint(0, w - self.crop_size) y1 = random.randint(0, h - self.crop_size) img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) return {'image': img, 'label': mask} class FixScaleCrop(object): def __init__(self, crop_size): self.crop_size = crop_size def __call__(self, sample): img = sample['image'] mask = sample['label'] w, h = img.size if w > h: oh = self.crop_size ow = int(1.0 * w * oh / h) else: ow = self.crop_size oh = int(1.0 * h * ow / w) img = img.resize((ow, oh), Image.BILINEAR) mask = mask.resize((ow, oh), Image.NEAREST) # center crop w, h = img.size x1 = int(round((w - self.crop_size) / 2.)) y1 = int(round((h - self.crop_size) / 2.)) img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) return {'image': img, 'label': mask} # resize to 512*1024 class FixedResize(object): """change the short edge length to size""" def __init__(self, resize=512): self.size1 = resize # size= 512 def __call__(self, sample): img = sample['image'] mask = sample['label'] assert img.size == mask.size w, h = img.size if w > h: oh = self.size1 ow = int(1.0 * w * oh / h) else: ow = self.size1 oh = int(1.0 * h * ow / w) img = img.resize((ow, oh), Image.BILINEAR) mask = mask.resize((ow, oh), Image.NEAREST) return {'image': img, 'label': mask} # random crop 321*321 class
(object): def __init__(self, crop_size=320): self.crop_size = crop_size def __call__(self, sample): img = sample['image'] mask = sample['label'] w, h = img.size x1 = random.randint(0, w - self.crop_size) y1 = random.randint(0, h - self.crop_size) img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) return {'image': img, 'label': mask} class RandomScale(object): def __init__(self, scales=(1,)): self.scales = scales def __call__(self, sample): img = sample['image'] mask = sample['label'] w, h = img.size scale = random.choice(self.scales) w, h = int(w * scale), int(h * scale) return {'image': img, 'label': mask} class TransformTr(object): def __init__(self, resize, multi_scale=None): if multi_scale is None: self.composed_transforms = transforms.Compose([ FixedResize(resize=resize), # RandomCrop(crop_size=args.crop_size), # tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255), # tr.RandomGaussianBlur(), # Normalize(mean, std), # ToTensor() ]) else: self.composed_transforms = transforms.Compose([ FixedResize(resize=args.resize), RandomScale(scales=args.multi_scale), RandomCrop(crop_size=args.crop_size), # tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255), # tr.RandomGaussianBlur(), Normalize(mean, std), ToTensor()]) def __call__(self, sample): return self.composed_transforms(sample) class TransformVal(object): def __init__(self, args, mean, std): self.composed_transforms = transforms.Compose([ FixedResize(resize=args.resize), FixScaleCrop(crop_size=args.crop_size), # TODO:CHECK THIS Normalize(mean, std), ToTensor()]) def __call__(self, sample): return self.composed_transforms(sample)
RandomCrop
git_test.go
package main import ( "testing" "strings" ) func TestFetch(t *testing.T) { mockRunner := NewMockRunner("") git := NewCustomGit(mockRunner) repo := &Repo{Path: "/test/"} git.Fetch(repo) if mockRunner.folder != "/test/" { t.Errorf("Folder should be /test/") } if mockRunner.command!= "git" { t.Errorf("Command should be git") } if len(mockRunner.args) != 2 { t.Errorf("Args size should be 2") } if mockRunner.args[0] != "fetch" { t.Errorf("Args 0 should be fetch") } if mockRunner.args[1] != "--all" { t.Errorf("Args 1 should be --all") } } func TestClone(t *testing.T) { // TODO } func TestStatus(t *testing.T) { mockRunner := NewMockRunner("") git := NewCustomGit(mockRunner) repo := &Repo{Path: "/test/"} git.Status(repo) if mockRunner.folder != "/test/" { t.Errorf("Folder should be /test/") } if mockRunner.command!= "git" { t.Errorf("Command should be git") } if len(mockRunner.args) != 2 { t.Errorf("Args size should be 2") } if mockRunner.args[0] != "status" { t.Errorf("Args 0 should be status") } if mockRunner.args[1] != "-unormal" { t.Errorf("Args 1 should be --all") } } func TestNotGitRepoStatus(t *testing.T) { output := "fatal: Not a git repository (or any of the parent directories): .git" mockRunner := NewMockRunner(output) git := NewCustomGit(mockRunner) repo := &Repo{Path: "/test/"} status := git.Status(repo) if status != "error" { t.Errorf("Should be error status") } } func TestRemoteSyncStatus(t *testing.T) { output := `On branch develop Your branch is up to date with 'origin/develop'. nothing to commit, working tree clean` mockRunner := NewMockRunner(output) git := NewCustomGit(mockRunner) repo := &Repo{Path: "/test/"} status := git.Status(repo) if !strings.HasPrefix(status, SYNC) { t.Errorf("Should be sync status") } } func TestRemoteBehindStatus(t *testing.T)
func TestRemoteAheadStatus(t *testing.T) { output := `On branch develop Your branch is ahead of 'origin/develop' by 1 commit. (use "git push" to publish your local commits) nothing to commit, working tree clean` mockRunner := NewMockRunner(output) git := NewCustomGit(mockRunner) repo := &Repo{Path: "/test/"} status := git.Status(repo) if !strings.HasPrefix(status, AHEAD) { t.Errorf("Should be ahead status") } } func TestRemoteDivergedStatus(t *testing.T) { output := `On branch chore/test Your branch and 'origin/chore/test' have diverged, and have 1 and 1 different commits each, respectively. (use "git pull" to merge the remote branch into yours) nothing to commit, working tree clean` mockRunner := NewMockRunner(output) git := NewCustomGit(mockRunner) repo := &Repo{Path: "/test/"} status := git.Status(repo) if !strings.HasPrefix(status, DIVERGED) { t.Errorf("Should be diverged status") } } func TestRemoteNoRemoteStatus(t *testing.T) { output := `On branch test nothing to commit, working tree clean` mockRunner := NewMockRunner(output) git := NewCustomGit(mockRunner) repo := &Repo{Path: "/test/"} status := git.Status(repo) if !strings.HasPrefix(status, NO_REMOTE) { t.Errorf("Should be no remote status") } } func TestLocalSyncStatus(t *testing.T) { output := `On branch develop Your branch is up-to-date with 'origin/develop'. nothing to commit, working tree clean` mockRunner := NewMockRunner(output) git := NewCustomGit(mockRunner) repo := &Repo{Path: "/test/"} status := git.Status(repo) if !strings.HasSuffix(status, SYNC) { t.Errorf("Should be sync status") } } func TestLocalUntrackedStatus(t *testing.T) { output := `On branch develop Your branch is up-to-date with 'origin/develop'. Untracked files: (use "git add <file>..." to include in what will be committed) untracked-file nothing added to commit but untracked files present (use "git add" to track)` mockRunner := NewMockRunner(output) git := NewCustomGit(mockRunner) repo := &Repo{Path: "/test/"} status := git.Status(repo) if !strings.HasSuffix(status, UNTRACKED) { t.Errorf("Should be untracked status") } } func TestLocalChangedStatus(t *testing.T) { output := `On branch develop Your branch is up-to-date with 'origin/develop'. Changes not staged for commit: (use "git add <file>..." to update what will be committed) (use "git checkout -- <file>..." to discard changes in working directory) modified: file.txt no changes added to commit (use "git add" and/or "git commit -a")` mockRunner := NewMockRunner(output) git := NewCustomGit(mockRunner) repo := &Repo{Path: "/test/"} status := git.Status(repo) if !strings.HasSuffix(status, CHANGED) { t.Errorf("Should be changed status") } } func NewMockRunner(output string) *MockRunner { return &MockRunner{output: output} } func (runner *MockRunner) Run(folder string, command string, args []string) (string, error) { runner.folder = folder runner.command = command runner.args = args return runner.output, nil } type MockRunner struct { folder string command string args []string output string }
{ output := `On branch develop Your branch is behind 'origin/develop' by 10 commits, and can be fast-forwarded. (use "git pull" to update your local branch) nothing to commit, working tree clean` mockRunner := NewMockRunner(output) git := NewCustomGit(mockRunner) repo := &Repo{Path: "/test/"} status := git.Status(repo) if !strings.HasPrefix(status, BEHIND) { t.Errorf("Should be behind status") } }
flowinstance_test.go
package activity import ( "fmt" "testing" ) func
(t *testing.T) { json := `{ "name": "测试to flow", "start": { "params":{ "name":{"type":"string","value":"menghui"}, "age":{"type":"number","value":41} }, "variables": { "var_a": { "type": "string", "value": "test var" }, "var_b": { "type": "number", "value": 12 } }, "flow": [ { "gate": "to", "target": [{ "expressions" : ["var_b=var_b+10"], "style" : "stdout", "flow" : [ { "gate" : "to", "target": [{ "style" : "stdout", "expressions" : ["var_a='next activity'"] }] } ] }] } ]}}` fl, err := NewFlowInstanceFromJSON(json) if err != nil { fmt.Println(err) return } r := map[string]interface{}{ "name": "menghui", } err = fl.Execute(r) if err != nil { fmt.Println(err) t.Fail() } } func TestIfFlowToInstance(t *testing.T) { json := `{ "name": "测试to flow", "start": { "params":{ "name":{"type":"string","value":"menghui"}, "age":{"type":"number","value":41} }, "variables": { "var_a": { "type": "string", "value": "test var" }, "var_b": { "type": "number", "value": 12 } }, "flow": [ { "gate":"ifto", "if":"name=='menghui'", "then":[ { "style" : "stdout", "expressions" : ["var_a=\"he is tongtong's father!\""] } ], "else":[ { "style" : "stdout", "expressions" : ["var_a=\"he is not tongtong's father!\""] } ] } ]}}` fl, err := NewFlowInstanceFromJSON(json) if err != nil { fmt.Println(err) return } r := map[string]interface{}{ "name": "menghui2", } err = fl.Execute(r) if err != nil { fmt.Println(err) t.Fail() } } func TestFlowLoopInstance(t *testing.T) { json := `{ "name": "测试to flow", "start": { "params":{ "name":{"type":"string","value":"menghui"}, "age":{"type":"number","value":41} }, "variables": { "var_a": { "type": "string", "value": "test var" }, "var_b": { "type": "number", "value": 12 } }, "flow": [{ "gate":"loop", "assign":["a=1"], "step":["a=a+1"], "while": "a<10", "do":[{ "style" : "stdout" }] }] }}` fl, err := NewFlowInstanceFromJSON(json) if err != nil { fmt.Println(err) return } r := map[string]interface{}{ "name": "menghui2", } err = fl.Execute(r) if err != nil { fmt.Println(err) t.Fail() } }
TestNewFlowToInstance
service_status.rs
// Generated from definition io.k8s.api.core.v1.ServiceStatus /// ServiceStatus represents the current status of a service. #[derive(Clone, Debug, Default, PartialEq)] pub struct ServiceStatus { /// Current service state pub conditions: Option<Vec<crate::apimachinery::pkg::apis::meta::v1::Condition>>, /// LoadBalancer contains the current status of the load-balancer, if one is present. pub load_balancer: Option<crate::api::core::v1::LoadBalancerStatus>, } impl<'de> crate::serde::Deserialize<'de> for ServiceStatus { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> { #[allow(non_camel_case_types)] enum Field { Key_conditions, Key_load_balancer, Other, } impl<'de> crate::serde::Deserialize<'de> for Field { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> { struct Visitor; impl<'de> crate::serde::de::Visitor<'de> for Visitor { type Value = Field; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("field identifier") } fn
<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error { Ok(match v { "conditions" => Field::Key_conditions, "loadBalancer" => Field::Key_load_balancer, _ => Field::Other, }) } } deserializer.deserialize_identifier(Visitor) } } struct Visitor; impl<'de> crate::serde::de::Visitor<'de> for Visitor { type Value = ServiceStatus; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("ServiceStatus") } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> { let mut value_conditions: Option<Vec<crate::apimachinery::pkg::apis::meta::v1::Condition>> = None; let mut value_load_balancer: Option<crate::api::core::v1::LoadBalancerStatus> = None; while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? { match key { Field::Key_conditions => value_conditions = crate::serde::de::MapAccess::next_value(&mut map)?, Field::Key_load_balancer => value_load_balancer = crate::serde::de::MapAccess::next_value(&mut map)?, Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; }, } } Ok(ServiceStatus { conditions: value_conditions, load_balancer: value_load_balancer, }) } } deserializer.deserialize_struct( "ServiceStatus", &[ "conditions", "loadBalancer", ], Visitor, ) } } impl crate::serde::Serialize for ServiceStatus { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer { let mut state = serializer.serialize_struct( "ServiceStatus", self.conditions.as_ref().map_or(0, |_| 1) + self.load_balancer.as_ref().map_or(0, |_| 1), )?; if let Some(value) = &self.conditions { crate::serde::ser::SerializeStruct::serialize_field(&mut state, "conditions", value)?; } if let Some(value) = &self.load_balancer { crate::serde::ser::SerializeStruct::serialize_field(&mut state, "loadBalancer", value)?; } crate::serde::ser::SerializeStruct::end(state) } } #[cfg(feature = "schemars")] impl crate::schemars::JsonSchema for ServiceStatus { fn schema_name() -> String { "io.k8s.api.core.v1.ServiceStatus".to_owned() } fn json_schema(__gen: &mut crate::schemars::gen::SchemaGenerator) -> crate::schemars::schema::Schema { crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject { metadata: Some(Box::new(crate::schemars::schema::Metadata { description: Some("ServiceStatus represents the current status of a service.".to_owned()), ..Default::default() })), instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::Object))), object: Some(Box::new(crate::schemars::schema::ObjectValidation { properties: IntoIterator::into_iter([ ( "conditions".to_owned(), crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject { metadata: Some(Box::new(crate::schemars::schema::Metadata { description: Some("Current service state".to_owned()), ..Default::default() })), instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::Array))), array: Some(Box::new(crate::schemars::schema::ArrayValidation { items: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(__gen.subschema_for::<crate::apimachinery::pkg::apis::meta::v1::Condition>()))), ..Default::default() })), ..Default::default() }), ), ( "loadBalancer".to_owned(), { let mut schema_obj = __gen.subschema_for::<crate::api::core::v1::LoadBalancerStatus>().into_object(); schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata { description: Some("LoadBalancer contains the current status of the load-balancer, if one is present.".to_owned()), ..Default::default() })); crate::schemars::schema::Schema::Object(schema_obj) }, ), ]).collect(), ..Default::default() })), ..Default::default() }) } }
visit_str
_line.py
import _plotly_utils.basevalidators class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__( self, plotly_name='line', parent_name='scattergeo.marker', **kwargs ): super(LineValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, data_class_str='Line', data_docs=""" autocolorscale Determines whether the colorscale is a default palette (`autocolorscale: true`) or the palette determined by `marker.line.colorscale`. Has an effect only if in `marker.line.color`is set to a numerical array. In case `colorscale` is unspecified or `autocolorscale` is true, the default palette will be chosen according to whether numbers in the `color` array are all positive, all negative or mixed. cauto Determines whether or not the color domain is computed with respect to the input data (here in `marker.line.color`) or the bounds set in `marker.line.cmin` and `marker.line.cmax` Has an effect only if in `marker.line.color`is set to a numerical array. Defaults to `false` when `marker.line.cmin` and `marker.line.cmax` are set by the user. cmax Sets the upper bound of the color domain. Has an effect only if in `marker.line.color`is set to a numerical array. Value should have the same units as in `marker.line.color` and if set, `marker.line.cmin` must be set as well. cmin Sets the lower bound of the color domain. Has an effect only if in `marker.line.color`is set to a numerical array. Value should have the same units as in `marker.line.color` and if set, `marker.line.cmax` must be set as well. color Sets themarker.linecolor. It accepts either a specific color or an array of numbers that are mapped to the colorscale relative to the max and min values of the array or relative to `marker.line.cmin` and `marker.line.cmax` if set. colorscale Sets the colorscale. Has an effect only if in `marker.line.color`is set to a numerical array. The colorscale must be an array containing arrays mapping a normalized value to an rgb, rgba, hex, hsl, hsv, or named color string. At minimum, a mapping for the lowest (0) and highest (1) values are required. For example, `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`. To control the bounds of the colorscale in color space, use`marker.line.cmin` and `marker.line.cmax`. Alternatively, `colorscale` may be a palette name string of the following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,R eds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Black body,Earth,Electric,Viridis,Cividis. colorsrc Sets the source reference on plot.ly for color . reversescale Reverses the color mapping if true. Has an effect only if in `marker.line.color`is set to a numerical array. If true, `marker.line.cmin` will correspond to the last color in the array and `marker.line.cmax` will correspond to the first color. width Sets the width (in px) of the lines bounding the marker points. widthsrc Sets the source reference on plot.ly for width . """, **kwargs )
lib.rs
extern crate chrono; #[macro_use] extern crate log; #[macro_use] extern crate error_chain; extern crate indextree; extern crate libc; extern crate nix; extern crate serde; extern crate spawn_ptrace; mod errors; pub use errors::*; use chrono::{Duration, Local, DateTime}; use indextree::{Arena, NodeId}; pub use indextree::NodeEdge; use libc::{c_long, pid_t}; use nix::c_void; use nix::sys::ptrace::{ptrace, ptrace_setoptions}; use nix::sys::ptrace::ptrace::{PTRACE_EVENT_FORK, PTRACE_EVENT_VFORK, PTRACE_EVENT_CLONE, PTRACE_EVENT_EXEC}; use nix::sys::ptrace::ptrace::{PTRACE_O_TRACECLONE, PTRACE_O_TRACEEXEC, PTRACE_O_TRACEFORK, PTRACE_O_TRACEVFORK, PTRACE_GETEVENTMSG, PTRACE_CONT}; use nix::sys::signal; use nix::sys::wait::{waitpid, WaitStatus}; use serde::{Serialize, Serializer}; use serde::ser::{SerializeSeq, SerializeStruct}; use spawn_ptrace::CommandPtraceSpawn; use std::collections::HashMap; use std::fs::File; use std::io::Read; use std::process::Command; use std::ptr; use std::time::Instant; /// Information about a spawned process. pub struct ProcessInfo { /// The process ID. pub pid: pid_t, /// When the process was started. pub started: Instant, /// When the process ended, or `None` if it is still running. pub ended: Option<Instant>, /// The commandline with which this process was executed. pub cmdline: Vec<String>, } impl Default for ProcessInfo { fn default() -> ProcessInfo { ProcessInfo { pid: 0, started: Instant::now(), ended: None, cmdline: vec!(), } } } /// A tree of processes. pub struct ProcessTree { arena: Arena<ProcessInfo>, pids: HashMap<pid_t, NodeId>, root: NodeId, started: DateTime<Local>, } impl ProcessTree { /// Execute `cmd`, tracking all child processes it spawns, and return a `ProcessTree` listing /// them. pub fn spawn<T>(mut cmd: Command, cmdline: &[T]) -> Result<ProcessTree> where T: AsRef<str> { let started = Local::now(); let child = cmd.spawn_ptrace().chain_err(|| "Error spawning process")?; let pid = child.id() as pid_t; trace!("Spawned process {}", pid); // Setup our ptrace options ptrace_setoptions(pid, PTRACE_O_TRACEEXEC | PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACECLONE).chain_err(|| "Error setting ptrace options")?; let mut arena = Arena::new(); let mut pids = HashMap::new(); let root = get_or_insert_pid(pid, &mut arena, &mut pids); arena[root].data.cmdline = cmdline.iter().map(|s| s.as_ref().to_string()).collect(); continue_process(pid, None).chain_err(|| "Error continuing process")?; loop { if !root.descendants(&arena).any(|node| arena[node].data.ended.is_none()) { break } match waitpid(-1, None) { Ok(WaitStatus::Exited(pid, ret)) => { trace!("Process {} exited with status {}", pid, ret); let node = get_or_insert_pid(pid, &mut arena, &mut pids); arena[node].data.ended = Some(Instant::now()); } Ok(WaitStatus::Signaled(pid, sig, _)) => { trace!("Process {} exited with signal {:?}", pid, sig); let node = get_or_insert_pid(pid, &mut arena, &mut pids); arena[node].data.ended = Some(Instant::now()); } Ok(WaitStatus::PtraceEvent(pid, _sig, event)) => { match event { PTRACE_EVENT_FORK | PTRACE_EVENT_VFORK | PTRACE_EVENT_CLONE => { let mut new_pid: pid_t = 0; ptrace(PTRACE_GETEVENTMSG, pid, ptr::null_mut(), &mut new_pid as *mut pid_t as *mut c_void) .chain_err(|| "Failed to get pid of forked process")?; let name = match event { PTRACE_EVENT_FORK => "fork", PTRACE_EVENT_VFORK => "vfork", PTRACE_EVENT_CLONE => "clone", _ => unreachable!(), }; trace!("[{}] {} new process {}", pid, name, new_pid); match pids.get(&pid) { Some(&parent) => { let cmdline = { let parent_data = &arena[parent].data; if parent_data.cmdline.len() > 1 { parent_data.cmdline[..1].to_vec() } else { vec![] } }; let child = get_or_insert_pid(new_pid, &mut arena, &mut pids); arena[child].data.cmdline = cmdline; parent.append(child, &mut arena); } None => bail!("Got an {:?} event for unknown parent pid {}", event, pid), } } PTRACE_EVENT_EXEC => { let mut buf = vec!(); match pids.get(&pid) { Some(&node) => { File::open(format!("/proc/{}/cmdline", pid)) .and_then(|mut f| f.read_to_end(&mut buf)) .and_then(|_| { let mut cmdline = buf.split(|&b| b == 0).map(|bytes| String::from_utf8_lossy(bytes).into_owned()).collect::<Vec<_>>(); cmdline.pop(); debug!("[{}] exec {:?}", pid, cmdline); arena[node].data.cmdline = cmdline; Ok(()) }) .chain_err(|| "Couldn't read cmdline")?; } None => bail!("Got an exec event for unknown pid {}", pid), } } _ => panic!("Unexpected ptrace event: {:?}", event), } continue_process(pid, None).chain_err(|| "Error continuing process")?; } Ok(WaitStatus::Stopped(pid, sig)) => { trace!("[{}] stopped with {:?}", pid, sig); // Sometimes we get the SIGSTOP+exit from a child before we get the clone // stop from the parent, so insert any unknown pids here so we have a better // approximation of the process start time. get_or_insert_pid(pid, &mut arena, &mut pids); let continue_sig = if sig == signal::Signal::SIGSTOP { None } else { Some(sig) }; continue_process(pid, continue_sig).chain_err(|| "Error continuing process")?; } Ok(s) => bail!("Unexpected process status: {:?}", s), Err(e) => { match e { nix::Error::Sys(nix::Errno::EINTR) => { /*FIXME if SIGNAL_DELIVERED.swap(false, Ordering::Relaxed) { println!("Active processes:"); print_process_tree(root, arena, |info| info.ended.is_none()); } */ } _ => bail!("ptrace error: {:?}", e), } } } } Ok(ProcessTree { arena: arena, pids: pids, root: root, started: started, }) } /// Iterate over processes in the tree in tree order. pub fn traverse<'a>(&'a self) -> Traverse<'a> { Traverse { inner: self.root.traverse(&self.arena), arena: &self.arena, } } /// Look up a process in the tree by pid. pub fn get(&self, pid: pid_t) -> Option<&ProcessInfo> { match self.pids.get(&pid) { None => None, Some(&node) => Some(&self.arena[node].data), } } } pub struct Traverse<'a> { inner: indextree::Traverse<'a, ProcessInfo>, arena: &'a Arena<ProcessInfo>, } impl<'a> Iterator for Traverse<'a> { type Item = NodeEdge<&'a ProcessInfo>; fn next(&mut self) -> Option<NodeEdge<&'a ProcessInfo>> { match self.inner.next() { None => None, Some(NodeEdge::Start(node)) => { Some(NodeEdge::Start(&self.arena[node].data)) } Some(NodeEdge::End(node)) => { Some(NodeEdge::End(&self.arena[node].data)) } } } } struct ProcessInfoSerializable<'a>(NodeId, &'a Arena<ProcessInfo>, Instant, DateTime<Local>); struct ChildrenSerializable<'a>(NodeId, &'a Arena<ProcessInfo>, Instant, DateTime<Local>); fn dt(a: Instant, b: Instant, c: DateTime<Local>) -> String { let d = c + Duration::from_std(a - b).unwrap(); d.to_rfc3339() } impl<'a> Serialize for ProcessInfoSerializable<'a> { fn serialize<S>(&self, serializer: S) -> ::std::result::Result<S::Ok, S::Error> where S: Serializer { let mut state = serializer.serialize_struct("ProcessInfo", 5)?; { let info = &self.1[self.0].data; state.serialize_field("pid", &info.pid)?; state.serialize_field("started", &dt(info.started, self.2, self.3))?; state.serialize_field("ended", &info.ended.map(|i| dt(i, self.2, self.3)))?; state.serialize_field("cmdline", &info.cmdline)?; } state.serialize_field("children", &ChildrenSerializable(self.0, self.1, self.2, self.3))?; state.end() } } impl<'a> Serialize for ChildrenSerializable<'a> { fn serialize<S>(&self, serializer: S) -> ::std::result::Result<S::Ok, S::Error> where S: Serializer { let len = self.0.children(self.1).count(); let mut seq = serializer.serialize_seq(Some(len))?; for c in self.0.children(self.1) { seq.serialize_element(&ProcessInfoSerializable(c, self.1, self.2, self.3))?; } seq.end() } } impl Serialize for ProcessTree { fn serialize<S>(&self, serializer: S) -> ::std::result::Result<S::Ok, S::Error> where S: Serializer { let started = self.arena[self.root].data.started; let root_pi = ProcessInfoSerializable(self.root, &self.arena, started, self.started); root_pi.serialize(serializer) } } fn get_or_insert_pid(pid: pid_t, arena: &mut Arena<ProcessInfo>, map: &mut HashMap<pid_t, NodeId>) -> NodeId { *map.entry(pid).or_insert_with(|| { arena.new_node(ProcessInfo { pid: pid, .. ProcessInfo::default() }) }) } fn
(pid: pid_t, signal: Option<signal::Signal>) -> nix::Result<c_long> { let data = signal.map(|s| s as i32 as *mut c_void).unwrap_or(ptr::null_mut()); ptrace(PTRACE_CONT, pid, ptr::null_mut(), data) }
continue_process
reference_parser.rs
use crate::models::Location; use crate::parsers::Parser; pub struct ReferenceParser { prefix: Option<Option<()>>, reference: Option<ReferenceParserPayload>, references: Vec<ReferenceParserPayload>, suffix: Option<Option<()>>, location: Location, done: bool, } impl ReferenceParser { pub fn new() -> Self { Self { prefix: None, reference: None, references: vec![], suffix: None, location: Location::default(), done: false, } } pub fn call(self) -> Vec<ReferenceParserPayload> { self.references } } impl Default for ReferenceParser { fn default() -> Self { Self::new() } } impl Parser for ReferenceParser { fn next(&mut self, c: char) { self.location.next(c); if c == '[' { self.location.in_range(); } match self.prefix { None => match c { '[' => self.prefix = Some(None), _ => { self.location.resume(); return; } }, Some(prefix) => match prefix { None => match c { '[' => self.prefix = Some(Some(())), _ => { self.location.resume(); self.prefix = None; } }, Some(_) => match &mut self.reference { None => match c { ' ' => return, '\n' => self.prefix = None, c => { self.reference = Some(ReferenceParserPayload::from(c, self.location.clone())) } }, Some(payload) => match c { '\n' => { self.prefix = None; self.suffix = None; self.reference = None; } ']' => { payload.location.next(c); match self.suffix { None => self.suffix = Some(None), Some(_) => self.done = true, } } c => { payload.push(c); payload.location.next(c); } }, }, }, } if self.done { self.prefix = None; self.suffix = None; self.references.push(self.reference.take().unwrap()); self.location.resume(); self.done = false } } } #[derive(Debug)] pub struct ReferenceParserPayload { pub header: String, pub location: Location, } impl ReferenceParserPayload { fn from(c: char, location: Location) -> Self { Self { header: String::from(c), location, } } fn push(&mut self, c: char) { self.header.push(c); } } #[cfg(test)] mod tests { use super::*; #[test] fn test_reference_parser_new() { assert!(!ReferenceParser::new().done); } #[test] fn test_reference_parser_default() { assert!(!ReferenceParser::default().done); } #[test] fn test_reference_parser_location_char_position_no_newline() { let mut parser = ReferenceParser::new(); parser.next('a'); assert_eq!(parser.location.line_position, 0); assert_eq!(parser.location.start_char_position, 0); assert_eq!(parser.location.end_char_position, 0); } #[test] fn test_reference_parser_location_char_position_newline() { let mut parser = ReferenceParser::new(); parser.next('\n'); assert_eq!(parser.location.line_position, 1); assert_eq!(parser.location.start_char_position, -1); assert_eq!(parser.location.end_char_position, -1); parser.next('a'); assert_eq!(parser.location.line_position, 1); assert_eq!(parser.location.start_char_position, 0); assert_eq!(parser.location.end_char_position, 0); } #[test] fn test_reference_parser_location_in_range() { let mut parser = ReferenceParser::new(); parser.next('['); parser.next('['); parser.next('a'); parser.next(']'); assert!(parser.location.in_range); } #[test] fn test_reference_parser_location_out_of_range() { let mut parser = ReferenceParser::new(); parser.next('['); parser.next('['); parser.next('a'); parser.next(']'); parser.next(']'); assert!(!parser.location.in_range); } #[test] fn test_reference_parser_next_initial_state() { let mut parser = ReferenceParser::new(); parser.next('\n'); assert!(!parser.done); assert!(parser.prefix.is_none());
assert!(parser.references.is_empty()); } #[test] fn test_reference_parser_next_prefix_first_state() { let mut parser = ReferenceParser::new(); parser.next('['); assert!(!parser.done); assert!(parser.prefix.is_some()); assert!(parser.prefix.unwrap().is_none()); assert!(parser.suffix.is_none()); assert!(parser.reference.is_none()); assert!(parser.references.is_empty()); parser.next('\n'); assert!(!parser.done); assert!(parser.prefix.is_none()); assert!(parser.suffix.is_none()); assert!(parser.reference.is_none()); assert!(parser.references.is_empty()); } #[test] fn test_reference_parser_next_prefix_final_state() { let mut parser = ReferenceParser::new(); parser.next('['); parser.next('['); assert!(!parser.done); assert!(parser.prefix.is_some()); assert!(parser.prefix.unwrap().is_some()); assert!(parser.suffix.is_none()); assert!(parser.reference.is_none()); assert!(parser.references.is_empty()); parser.next('\n'); assert!(!parser.done); assert!(parser.prefix.is_none()); assert!(parser.suffix.is_none()); assert!(parser.reference.is_none()); assert!(parser.references.is_empty()); } #[test] fn test_reference_parser_next_reference_capture_state() { let mut parser = ReferenceParser::new(); parser.next('['); parser.next('['); parser.next('a'); parser.next('b'); parser.next('c'); assert!(!parser.done); assert!(parser.prefix.is_some()); assert!(parser.prefix.unwrap().is_some()); assert!(parser.suffix.is_none()); assert!(parser.reference.is_some()); assert_eq!( parser.reference.as_ref().unwrap().header, String::from("abc") ); assert!(parser.references.is_empty()); parser.next('\n'); assert!(!parser.done); assert!(parser.prefix.is_none()); assert!(parser.suffix.is_none()); assert!(parser.reference.is_none()); assert!(parser.references.is_empty()); } #[test] fn test_reference_parser_next_suffix_first_state() { let mut parser = ReferenceParser::new(); parser.next('['); parser.next('['); parser.next('a'); parser.next('b'); parser.next('c'); parser.next(']'); assert!(!parser.done); assert!(parser.prefix.is_some()); assert!(parser.prefix.unwrap().is_some()); assert!(parser.reference.is_some()); assert_eq!( parser.reference.as_ref().unwrap().header, String::from("abc") ); assert!(parser.references.is_empty()); assert!(parser.suffix.is_some()); assert!(parser.suffix.unwrap().is_none()); parser.next('\n'); assert!(!parser.done); assert!(parser.prefix.is_none()); assert!(parser.suffix.is_none()); assert!(parser.reference.is_none()); assert!(parser.references.is_empty()); } #[test] fn test_reference_parser_next_suffix_final_state() { let mut parser = ReferenceParser::new(); parser.next('['); parser.next('['); parser.next('a'); parser.next('b'); parser.next('c'); parser.next(']'); parser.next(']'); assert!(!parser.done); assert!(parser.prefix.is_none()); assert!(parser.reference.is_none()); assert_eq!(parser.references.len(), 1); assert_eq!( parser.references.first().unwrap().header, String::from("abc") ); assert!(parser.suffix.is_none()); parser.next('\n'); assert!(!parser.done); assert!(parser.prefix.is_none()); assert!(parser.reference.is_none()); assert_eq!(parser.references.len(), 1); assert_eq!( parser.references.first().unwrap().header, String::from("abc") ); assert!(parser.suffix.is_none()); } #[test] fn test_reference_parser_call_in_non_final_state() { let mut parser = ReferenceParser::new(); parser.next('['); parser.next('['); parser.next('a'); parser.next('b'); parser.next('c'); parser.next(']'); assert!(parser.call().is_empty()); } #[test] fn test_reference_parser_call_in_final_state() { let mut parser = ReferenceParser::new(); parser.next('['); parser.next('['); parser.next('a'); parser.next('b'); parser.next('c'); parser.next(']'); parser.next(']'); assert!(!parser.call().is_empty()); } }
assert!(parser.suffix.is_none()); assert!(parser.reference.is_none());
service_check.py
#!/usr/bin/env python """ Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from resource_management import * import socket import sys import time import subprocess from hcat_service_check import hcat_service_check from webhcat_service_check import webhcat_service_check from ambari_commons import OSConst from ambari_commons.os_family_impl import OsFamilyImpl from resource_management.core import shell from resource_management.core.logger import Logger from resource_management.libraries.functions import get_unique_id_and_date class HiveServiceCheck(Script): pass @OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY) class HiveServiceCheckWindows(HiveServiceCheck): def service_check(self, env): import params env.set_params(params) smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd") service = "HIVE" Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hive_user, logoutput=True) hcat_service_check() webhcat_service_check() @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT) class HiveServiceCheckDefault(HiveServiceCheck): def __init__(self): super(HiveServiceCheckDefault, self).__init__() Logger.initialize_logger() def service_check(self, env): import params env.set_params(params) if params.security_enabled: kinit_cmd = format( "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ") else: kinit_cmd = "" # Check HiveServer Logger.info("Running Hive Server checks") Logger.info("--------------------------\n") self.check_hive_server(env, 'Hive Server', kinit_cmd, params.hive_server_hosts, int(format("{hive_server_port}"))) if params.has_hive_interactive and params.hive_interactive_enabled: Logger.info("Running Hive Server2 checks") Logger.info("--------------------------\n") self.check_hive_server(env, 'Hive Server2', kinit_cmd, params.hive_interactive_hosts, int(format("{hive_server_interactive_port}"))) Logger.info("Running LLAP checks") Logger.info("-------------------\n") self.check_llap(env, kinit_cmd) Logger.info("Running HCAT checks") Logger.info("-------------------\n") hcat_service_check() Logger.info("Running WEBHCAT checks") Logger.info("---------------------\n") webhcat_service_check() def check_hive_server(self, env, server_component_name, kinit_cmd, address_list, server_port):
def check_llap(self, env, kinit_cmd): import params env.set_params(params) File(format("{tmp_dir}/hiveLlapSmoke.sh"), content=StaticFile("hiveLlapSmoke.sh"), mode=0755 ) unique_id = get_unique_id_and_date() llap_cmd = format("{kinit_cmd}env JAVA_HOME={java64_home} {tmp_dir}/hiveLlapSmoke.sh {stack_root} llap_smoke_{unique_id} prepare") exec_path = params.execute_path if params.version and params.stack_root: upgrade_hive_bin = format("{stack_root}/{version}/hive2/bin") exec_path = os.environ['PATH'] + os.pathsep + params.hadoop_bin_dir + os.pathsep + upgrade_hive_bin Execute(llap_cmd, user=params.hive_user, path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', exec_path], tries=1, try_sleep=5, wait_for_finish=True, stderr=subprocess.PIPE, logoutput=True) if __name__ == "__main__": HiveServiceCheck().execute()
import params env.set_params(params) Logger.info("Server Address List : {0}, Port : {1}".format(address_list, server_port)) if not address_list: raise Fail("Can not find any "+server_component_name+" ,host. Please check configuration.") SOCKET_WAIT_SECONDS = 290 start_time = time.time() end_time = start_time + SOCKET_WAIT_SECONDS Logger.info("Waiting for the {0} to start...".format(server_component_name)) workable_server_available = False i = 0 while time.time() < end_time and not workable_server_available: address = address_list[i] try: check_thrift_port_sasl(address, server_port, params.hive_server2_authentication, params.hive_server_principal, kinit_cmd, params.smokeuser, transport_mode=params.hive_transport_mode, http_endpoint=params.hive_http_endpoint, ssl=params.hive_ssl, ssl_keystore=params.hive_ssl_keystore_path, ssl_password=params.hive_ssl_keystore_password) Logger.info("Successfully connected to {0} on port {1}".format(address, server_port)) workable_server_available = True except: Logger.info("Connection to {0} on port {1} failed".format(address, server_port)) time.sleep(5) i += 1 if i == len(address_list): i = 0 elapsed_time = time.time() - start_time if not workable_server_available: raise Fail("Connection to '{0}' on host: {1} and port {2} failed after {3} seconds" .format(server_component_name, params.hostname, server_port, elapsed_time)) Logger.info("Successfully stayed connected to '{0}' on host: {1} and port {2} after {3} seconds" .format(server_component_name, params.hostname, server_port, elapsed_time))
credentials.go
/* Package sdk is the gRPC implementation of the SDK gRPC server Copyright 2018 Portworx Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package sdk import ( "context" "fmt" "github.com/golang/protobuf/jsonpb" "github.com/libopenstorage/openstorage/api" "github.com/libopenstorage/openstorage/pkg/auth" "github.com/libopenstorage/openstorage/volume" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) // CredentialServer is an implementation of the gRPC OpenStorageCredential interface type CredentialServer struct { server serverAccessor } func (s *CredentialServer) driver(ctx context.Context) volume.VolumeDriver { return s.server.driver(ctx) } // Create method creates credentials func (s *CredentialServer) Create( ctx context.Context, req *api.SdkCredentialCreateRequest, ) (*api.SdkCredentialCreateResponse, error) { if s.driver(ctx) == nil { return nil, status.Error(codes.Unavailable, "Resource has not been initialized") } if len(req.GetName()) == 0 { return nil, status.Error(codes.InvalidArgument, "Must supply a name") } else if aws := req.GetAwsCredential(); aws != nil { return s.awsCreate(ctx, req, aws) } else if azure := req.GetAzureCredential(); azure != nil { return s.azureCreate(ctx, req, azure) } else if google := req.GetGoogleCredential(); google != nil { return s.googleCreate(ctx, req, google) } return nil, status.Error(codes.InvalidArgument, "Unknown credential type") } func (s *CredentialServer) awsCreate( ctx context.Context, req *api.SdkCredentialCreateRequest, aws *api.SdkAwsCredentialRequest, ) (*api.SdkCredentialCreateResponse, error) { if len(aws.GetAccessKey()) == 0 { return nil, status.Error(codes.InvalidArgument, "Must supply Access Key") } if len(aws.GetSecretKey()) == 0 { return nil, status.Error(codes.InvalidArgument, "Must supply Secret Key") } if len(aws.GetRegion()) == 0 { return nil, status.Error(codes.InvalidArgument, "Must supply Region Key") } if len(aws.GetEndpoint()) == 0 { return nil, status.Error(codes.InvalidArgument, "Must supply Endpoint Key") } params := make(map[string]string) params[api.OptCredType] = "s3" params[api.OptCredName] = req.GetName() params[api.OptCredEncrKey] = req.GetEncryptionKey() params[api.OptCredBucket] = req.GetBucket() params[api.OptCredRegion] = aws.GetRegion() params[api.OptCredEndpoint] = aws.GetEndpoint() params[api.OptCredAccessKey] = aws.GetAccessKey() params[api.OptCredSecretKey] = aws.GetSecretKey() params[api.OptCredDisableSSL] = fmt.Sprintf("%v", aws.GetDisableSsl()) uuid, err := s.create(ctx, req, params) if err != nil { return nil, status.Errorf( codes.Internal, "failed to create aws credentials: %v", err.Error()) } err = validateAndDeleteIfInvalid(ctx, s, uuid) if err != nil { return nil, err } return &api.SdkCredentialCreateResponse{CredentialId: uuid}, nil } func (s *CredentialServer) azureCreate( ctx context.Context, req *api.SdkCredentialCreateRequest, azure *api.SdkAzureCredentialRequest, ) (*api.SdkCredentialCreateResponse, error) { if len(azure.GetAccountKey()) == 0 { return nil, status.Error(codes.InvalidArgument, "Must supply Account Key") } if len(azure.GetAccountName()) == 0 { return nil, status.Error(codes.InvalidArgument, "Must supply Account name") } params := make(map[string]string) params[api.OptCredType] = "azure" params[api.OptCredName] = req.GetName() params[api.OptCredEncrKey] = req.GetEncryptionKey() params[api.OptCredBucket] = req.GetBucket() params[api.OptCredAzureAccountKey] = azure.GetAccountKey() params[api.OptCredAzureAccountName] = azure.GetAccountName() uuid, err := s.create(ctx, req, params) if err != nil { return nil, status.Errorf( codes.Internal, "failed to create Azure credentials: %v", err.Error()) } err = validateAndDeleteIfInvalid(ctx, s, uuid) if err != nil { return nil, err } return &api.SdkCredentialCreateResponse{CredentialId: uuid}, nil } func (s *CredentialServer) googleCreate( ctx context.Context, req *api.SdkCredentialCreateRequest, google *api.SdkGoogleCredentialRequest, ) (*api.SdkCredentialCreateResponse, error) { if len(google.GetJsonKey()) == 0 { return nil, status.Error(codes.InvalidArgument, "Must supply JSON Key") } if len(google.GetProjectId()) == 0 { return nil, status.Error(codes.InvalidArgument, "Must supply Project ID") } params := make(map[string]string) params[api.OptCredType] = "google" params[api.OptCredName] = req.GetName() params[api.OptCredEncrKey] = req.GetEncryptionKey() params[api.OptCredBucket] = req.GetBucket() params[api.OptCredGoogleProjectID] = google.GetProjectId() params[api.OptCredGoogleJsonKey] = google.GetJsonKey() uuid, err := s.create(ctx, req, params) if err != nil { return nil, status.Errorf( codes.Internal, "failed to create Google credentials: %v", err.Error()) } err = validateAndDeleteIfInvalid(ctx, s, uuid) if err != nil { return nil, err } return &api.SdkCredentialCreateResponse{CredentialId: uuid}, nil } // Validate validates a specified Credential. func (s *CredentialServer) Validate( ctx context.Context, req *api.SdkCredentialValidateRequest, ) (*api.SdkCredentialValidateResponse, error) { if s.driver(ctx) == nil { return nil, status.Error(codes.Unavailable, "Resource has not been initialized") } if len(req.GetCredentialId()) == 0 { return nil, status.Error(codes.InvalidArgument, "Must provide credentials uuid") } // Check ownership _, err := s.Inspect(ctx, &api.SdkCredentialInspectRequest{ CredentialId: req.GetCredentialId(), }) if err != nil { return nil, err } err = s.driver(ctx).CredsValidate(req.GetCredentialId()) if err != nil { return nil, status.Errorf( codes.Internal, "failed to validate credentials: %v", err.Error()) } return &api.SdkCredentialValidateResponse{}, nil } // Delete deletes a specified credential func (s *CredentialServer) Delete( ctx context.Context, req *api.SdkCredentialDeleteRequest, ) (*api.SdkCredentialDeleteResponse, error) { if s.driver(ctx) == nil { return nil, status.Error(codes.Unavailable, "Resource has not been initialized") } if len(req.GetCredentialId()) == 0 { return nil, status.Error(codes.InvalidArgument, "Must provide credentials uuid") } // Check ownership resp, err := s.Inspect(ctx, &api.SdkCredentialInspectRequest{ CredentialId: req.GetCredentialId(), }) // This checks at least for READ access type to credential if err != nil { return nil, err } // This checks for admin access type to credential to be able to delete it if !resp.GetOwnership().IsPermittedByContext(ctx, api.Ownership_Admin) { return nil, status.Errorf( codes.PermissionDenied, "Only admin access type to credential is allowed to delete %v", req.GetCredentialId()) } err = s.driver(ctx).CredsDelete(req.GetCredentialId()) if err != nil { return nil, status.Errorf( codes.Internal, "failed to delete credentials: %v", err.Error()) } return &api.SdkCredentialDeleteResponse{}, nil } // Enumerate returns a list credentials ids func (s *CredentialServer) Enumerate( ctx context.Context, req *api.SdkCredentialEnumerateRequest, ) (*api.SdkCredentialEnumerateResponse, error) { if s.driver(ctx) == nil { return nil, status.Error(codes.Unavailable, "Resource has not been initialized") } credList, err := s.driver(ctx).CredsEnumerate() if err != nil { return nil, status.Errorf( codes.Internal, "Unable to enumerate credentials AWS: %v", err.Error()) } ids := make([]string, 0) for credId, cred := range credList { if s.isPermitted(ctx, api.Ownership_Read, cred) { ids = append(ids, credId) } } return &api.SdkCredentialEnumerateResponse{ CredentialIds: ids, }, nil } // Inspect returns information about credential id func (s *CredentialServer) Inspect( ctx context.Context, req *api.SdkCredentialInspectRequest, ) (*api.SdkCredentialInspectResponse, error) { if s.driver(ctx) == nil { return nil, status.Error(codes.Unavailable, "Resource has not been initialized") } if len(req.GetCredentialId()) == 0 { return nil, status.Error(codes.InvalidArgument, "Must provide a credential id") } credList, err := s.driver(ctx).CredsEnumerate() if err != nil { return nil, status.Errorf( codes.Internal, "Unable to enumerate credentials: %v", err.Error()) } cred, ok := credList[req.GetCredentialId()] if !ok { return nil, status.Errorf(codes.NotFound, "Credential id %s not found", req.GetCredentialId()) } info, ok := cred.(map[string]interface{}) if !ok { return nil, status.Error(codes.Internal, "Unable to get credential id information") } // Check ownership if !s.isPermitted(ctx, api.Ownership_Read, cred) { return nil, status.Errorf(codes.PermissionDenied, "Access denied to %s", req.GetCredentialId()) } credName, ok := info[api.OptCredName].(string) if !ok { // The code to support names may not be available credName = "" } bucket, ok := info[api.OptCredBucket].(string) if !ok { // The code to support bucket may not be available bucket = "" } // Get ownership ownership, err := s.getOwnershipFromCred(cred) if err != nil { return nil, err } resp := &api.SdkCredentialInspectResponse{ CredentialId: req.GetCredentialId(), Name: credName, Bucket: bucket, Ownership: ownership, } switch info[api.OptCredType] { case "s3": accessKey, ok := info[api.OptCredAccessKey].(string) if !ok { return nil, status.Error(codes.Internal, "Unable to parse accessKey") } endpoint, ok := info[api.OptCredEndpoint].(string) if !ok { return nil, status.Error(codes.Internal, "Unable to parse endpoint") } region, ok := info[api.OptCredRegion].(string) if !ok { return nil, status.Error(codes.Internal, "Unable to parse region") } disableSsl, ok := info[api.OptCredDisableSSL].(string) if !ok { return nil, status.Error(codes.Internal, "Unable to parse disabling ssl was requested") } resp.CredentialType = &api.SdkCredentialInspectResponse_AwsCredential{ AwsCredential: &api.SdkAwsCredentialResponse{ AccessKey: accessKey, Endpoint: endpoint, Region: region, DisableSsl: disableSsl == "true", }, } case "azure": accountName, ok := info[api.OptCredAzureAccountName].(string) if !ok { return nil, status.Error(codes.Internal, "Unable to parse account name") } resp.CredentialType = &api.SdkCredentialInspectResponse_AzureCredential{ AzureCredential: &api.SdkAzureCredentialResponse{ AccountName: accountName, }, } case "google": projectId, ok := info[api.OptCredGoogleProjectID].(string) if !ok { return nil, status.Error(codes.Internal, "Unable to parse project id") } resp.CredentialType = &api.SdkCredentialInspectResponse_GoogleCredential{ GoogleCredential: &api.SdkGoogleCredentialResponse{ ProjectId: projectId, }, } default: return nil, status.Errorf( codes.Internal, "Received unknown credential type of %s", info[api.OptCredType]) } return resp, nil } func (s *CredentialServer) create( ctx context.Context, req *api.SdkCredentialCreateRequest, params map[string]string) (string, error) { if params == nil || req == nil { return "", fmt.Errorf("params and/or request is nil and cannot create credentials") } // Add user as owner ownership := api.OwnershipSetUsernameFromContext(ctx, req.GetOwnership()) if ownership != nil { // Encode ownership in params m := jsonpb.Marshaler{OrigName: true} ownershipString, err := m.MarshalToString(ownership) if err != nil { return "", fmt.Errorf("failed to marshal ownership: %v", err) } params[api.OptCredOwnership] = ownershipString } return s.driver(ctx).CredsCreate(params) } func (s *CredentialServer) getOwnershipFromCred(cred interface{}) (*api.Ownership, error) { info, ok := cred.(map[string]interface{}) if !ok { return nil, status.Error(codes.Internal, "Unable to get credential id information") } // Get ownership var ownership *api.Ownership ownershipString, ok := info[api.OptCredOwnership].(string) if ok { if len(ownershipString) == 0 { return nil, nil } ownership = &api.Ownership{} err := jsonpb.UnmarshalString(ownershipString, ownership) if err != nil { return nil, status.Errorf( codes.Internal, "Failed to retreive ownership from credential object: %v", err) } } return ownership, nil } func (s CredentialServer) isPermitted( ctx context.Context, accessType api.Ownership_AccessType, cred interface{}, ) bool { ownership, err := s.getOwnershipFromCred(cred) if err != nil { return false } // If ownership is missing then it is also public if ownership == nil || ownership.IsPublic() { return true } if userinfo, ok := auth.NewUserInfoFromContext(ctx); ok { return ownership.IsPermitted(userinfo, accessType) } // Auth is not enabled if there is no user context return true } func validateAndDeleteIfInvalid(ctx context.Context, s *CredentialServer, uuid string) error { // Validate if the credentials provided were correct or not req := &api.SdkCredentialValidateRequest{CredentialId: uuid} validateErr := s.driver(ctx).CredsValidate(req.GetCredentialId()) if validateErr != nil { deleteCred := &api.SdkCredentialDeleteRequest{CredentialId: uuid} err := s.driver(ctx).CredsDelete(deleteCred.GetCredentialId()) if err != nil
return status.Errorf( codes.PermissionDenied, "credentials could not be validated: %v", validateErr.Error()) } return nil }
{ return status.Errorf( codes.Internal, "failed to delete invalid Google credentials: %v", err.Error()) }
generate.go
package main import ( "fmt" "math/rand" "sort" "strings" e2e "github.com/tendermint/tendermint/test/e2e/pkg" "github.com/tendermint/tendermint/types" ) var ( // testnetCombinations defines global testnet options, where we generate a // separate testnet for each combination (Cartesian product) of options. testnetCombinations = map[string][]interface{}{ "topology": {"single", "quad", "large"}, "ipv6": {false, true}, "initialHeight": {0, 1000}, "initialState": { map[string]string{}, map[string]string{"initial01": "a", "initial02": "b", "initial03": "c"}, }, "validators": {"genesis", "initchain"}, "keyType": {types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1}, } // The following specify randomly chosen values for testnet nodes. nodeDatabases = uniformChoice{"goleveldb", "cleveldb", "rocksdb", "boltdb", "badgerdb"} // FIXME: grpc disabled due to https://github.com/tendermint/tendermint/issues/5439 nodeABCIProtocols = uniformChoice{"unix", "tcp", "builtin"} // "grpc" nodePrivvalProtocols = uniformChoice{"file", "unix", "tcp", "grpc"} // FIXME: v2 disabled due to flake nodeFastSyncs = uniformChoice{"", "v0"} // "v2" nodeStateSyncs = uniformChoice{false, true} nodePersistIntervals = uniformChoice{0, 1, 5} nodeSnapshotIntervals = uniformChoice{0, 3} nodeRetainBlocks = uniformChoice{0, int(e2e.EvidenceAgeHeight), int(e2e.EvidenceAgeHeight) + 5} nodePerturbations = probSetChoice{ "disconnect": 0.1, "pause": 0.1, "kill": 0.1, "restart": 0.1, } evidence = uniformChoice{0, 1, 10} ) // Generate generates random testnets using the given RNG. func Generate(r *rand.Rand) ([]e2e.Manifest, error) { manifests := []e2e.Manifest{} for _, opt := range combinations(testnetCombinations) { manifest, err := generateTestnet(r, opt) if err != nil { return nil, err } manifests = append(manifests, manifest) } return manifests, nil } // generateTestnet generates a single testnet with the given options. func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, error) { manifest := e2e.Manifest{ IPv6: opt["ipv6"].(bool), InitialHeight: int64(opt["initialHeight"].(int)), InitialState: opt["initialState"].(map[string]string), Validators: &map[string]int64{}, ValidatorUpdates: map[string]map[string]int64{}, Nodes: map[string]*e2e.ManifestNode{}, KeyType: opt["keyType"].(string), Evidence: evidence.Choose(r).(int), } var numSeeds, numValidators, numFulls, numLightClients int switch opt["topology"].(string) { case "single": numValidators = 1 case "quad": numValidators = 4 case "large": // FIXME Networks are kept small since large ones use too much CPU. numSeeds = r.Intn(3) numLightClients = r.Intn(3) numValidators = 4 + r.Intn(7) numFulls = r.Intn(5) default: return manifest, fmt.Errorf("unknown topology %q", opt["topology"]) } // First we generate seed nodes, starting at the initial height. for i := 1; i <= numSeeds; i++ { manifest.Nodes[fmt.Sprintf("seed%02d", i)] = generateNode( r, e2e.ModeSeed, 0, manifest.InitialHeight, false) } // Next, we generate validators. We make sure a BFT quorum of validators start // at the initial height, and that we have two archive nodes. We also set up // the initial validator set, and validator set updates for delayed nodes. nextStartAt := manifest.InitialHeight + 5 quorum := numValidators*2/3 + 1 for i := 1; i <= numValidators; i++ { startAt := int64(0) if i > quorum { startAt = nextStartAt nextStartAt += 5 } name := fmt.Sprintf("validator%02d", i) manifest.Nodes[name] = generateNode( r, e2e.ModeValidator, startAt, manifest.InitialHeight, i <= 2) if startAt == 0 { (*manifest.Validators)[name] = int64(30 + r.Intn(71)) } else { manifest.ValidatorUpdates[fmt.Sprint(startAt+5)] = map[string]int64{ name: int64(30 + r.Intn(71)), } } } // Move validators to InitChain if specified. switch opt["validators"].(string) { case "genesis": case "initchain": manifest.ValidatorUpdates["0"] = *manifest.Validators manifest.Validators = &map[string]int64{} default: return manifest, fmt.Errorf("invalid validators option %q", opt["validators"]) } // Finally, we generate random full nodes. for i := 1; i <= numFulls; i++ { startAt := int64(0) if r.Float64() >= 0.5 { startAt = nextStartAt nextStartAt += 5 } manifest.Nodes[fmt.Sprintf("full%02d", i)] = generateNode( r, e2e.ModeFull, startAt, manifest.InitialHeight, false) } // We now set up peer discovery for nodes. Seed nodes are fully meshed with // each other, while non-seed nodes either use a set of random seeds or a // set of random peers that start before themselves. var seedNames, peerNames, lightProviders []string for name, node := range manifest.Nodes { if node.Mode == string(e2e.ModeSeed) { seedNames = append(seedNames, name) } else { // if the full node or validator is an ideal candidate, it is added as a light provider. // There are at least two archive nodes so there should be at least two ideal candidates if (node.StartAt == 0 || node.StartAt == manifest.InitialHeight) && node.RetainBlocks == 0 { lightProviders = append(lightProviders, name) } peerNames = append(peerNames, name) } } for _, name := range seedNames { for _, otherName := range seedNames { if name != otherName { manifest.Nodes[name].Seeds = append(manifest.Nodes[name].Seeds, otherName) } } } sort.Slice(peerNames, func(i, j int) bool { iName, jName := peerNames[i], peerNames[j] switch { case manifest.Nodes[iName].StartAt < manifest.Nodes[jName].StartAt: return true case manifest.Nodes[iName].StartAt > manifest.Nodes[jName].StartAt: return false default: return strings.Compare(iName, jName) == -1 } }) for i, name := range peerNames { if len(seedNames) > 0 && (i == 0 || r.Float64() >= 0.5) { manifest.Nodes[name].Seeds = uniformSetChoice(seedNames).Choose(r) } else if i > 0 { manifest.Nodes[name].PersistentPeers = uniformSetChoice(peerNames[:i]).Choose(r) } } // lastly, set up the light clients for i := 1; i <= numLightClients; i++ { startAt := manifest.InitialHeight + 5 manifest.Nodes[fmt.Sprintf("light%02d", i)] = generateLightNode( r, startAt+(5*int64(i)), lightProviders, ) } return manifest, nil } // generateNode randomly generates a node, with some constraints to avoid // generating invalid configurations. We do not set Seeds or PersistentPeers // here, since we need to know the overall network topology and startup // sequencing. func generateNode( r *rand.Rand, mode e2e.Mode, startAt int64, initialHeight int64, forceArchive bool, ) *e2e.ManifestNode { node := e2e.ManifestNode{ Mode: string(mode), StartAt: startAt, Database: nodeDatabases.Choose(r).(string), ABCIProtocol: nodeABCIProtocols.Choose(r).(string), PrivvalProtocol: nodePrivvalProtocols.Choose(r).(string), FastSync: nodeFastSyncs.Choose(r).(string), StateSync: nodeStateSyncs.Choose(r).(bool) && startAt > 0, PersistInterval: ptrUint64(uint64(nodePersistIntervals.Choose(r).(int))), SnapshotInterval: uint64(nodeSnapshotIntervals.Choose(r).(int)), RetainBlocks: uint64(nodeRetainBlocks.Choose(r).(int)), Perturb: nodePerturbations.Choose(r), } // If this node is forced to be an archive node, retain all blocks and // enable state sync snapshotting. if forceArchive { node.RetainBlocks = 0 node.SnapshotInterval = 3 } // If a node which does not persist state also does not retain blocks, randomly // choose to either persist state or retain all blocks.
node.RetainBlocks = 0 } else { node.PersistInterval = ptrUint64(node.RetainBlocks) } } // If either PersistInterval or SnapshotInterval are greater than RetainBlocks, // expand the block retention time. if node.RetainBlocks > 0 { if node.PersistInterval != nil && node.RetainBlocks < *node.PersistInterval { node.RetainBlocks = *node.PersistInterval } if node.RetainBlocks < node.SnapshotInterval { node.RetainBlocks = node.SnapshotInterval } } return &node } func generateLightNode(r *rand.Rand, startAt int64, providers []string) *e2e.ManifestNode { return &e2e.ManifestNode{ Mode: string(e2e.ModeLight), StartAt: startAt, Database: nodeDatabases.Choose(r).(string), ABCIProtocol: "builtin", PersistInterval: ptrUint64(0), PersistentPeers: providers, } } func ptrUint64(i uint64) *uint64 { return &i }
if node.PersistInterval != nil && *node.PersistInterval == 0 && node.RetainBlocks > 0 { if r.Float64() > 0.5 {
api_client.go
package api import ( "fmt" "strings" "net/http" "net/url" "io/ioutil" "bytes" ) var SOURCE_URL string = "http://192.168.0.22:8000" var DESTINATION_URL string = "http://192.168.0.23:8000" var IsDev bool = false type ApiClient struct { BaseUrl string Token string Username string Password string } func PrepareUrl(url string) string { return strings.Trim(url, "/") } func (apiClient *ApiClient) updateCsrfToken() { crumbIssuerUrl := apiClient.BaseUrl + "/crumbIssuer/api/xml" var reqUrl *url.URL csrfTokenUrl, err := reqUrl.Parse(crumbIssuerUrl) if err != nil { panic(fmt.Sprintf("Url `%v` parsing error ", crumbIssuerUrl)) } var reqParams = url.Values{} reqParams.Add("xpath", `concat(//crumbRequestField,":",//crumb)`) csrfTokenUrl.RawQuery = reqParams.Encode() reqClient := &http.Client{} req, err := http.NewRequest("GET", csrfTokenUrl.String(), nil) if err != nil { panic("Get Request generation failed") } req.SetBasicAuth(apiClient.Username, apiClient.Password) resp, err := reqClient.Do(req)
defer resp.Body.Close() fmt.Println(resp.Status) csrfToken, err := ioutil.ReadAll(resp.Body) if err != nil { panic("Jenkins CSRF Response reading failed") } apiClient.Token = string(csrfToken) } func (apiClient *ApiClient) prepareAPIHeader(req *http.Request) { apiClient.updateCsrfToken() csrfTokenArr := strings.Split(apiClient.Token, ":") if len(csrfTokenArr) == 2 { req.Header.Set(csrfTokenArr[0], csrfTokenArr[1]) } else { fmt.Println("Error: CSRF Token update failed") } req.SetBasicAuth(apiClient.Username, apiClient.Password) } func (apiClient *ApiClient) ApiGetQuery(reqUrl *url.URL) (*http.Response, error) { reqClient := &http.Client{} req, err := http.NewRequest("GET", reqUrl.String(), nil) if err != nil { fmt.Println("Error: ApiQuery request generation failed") } apiClient.prepareAPIHeader(req) return reqClient.Do(req) } func (apiClient *ApiClient) ApiPostQuery(reqUrl *url.URL, reqData []byte) (*http.Response, error) { fmt.Println(">> -- " + reqUrl.String()) reqClient := &http.Client{} req, err := http.NewRequest("POST", reqUrl.String(), bytes.NewBuffer(reqData)) if err != nil { fmt.Println("Error: ApiQuery request generation failed") } req.Header.Set("Content-Type", "text/xml") apiClient.prepareAPIHeader(req) return reqClient.Do(req) }
if err != nil { panic("Jenkins CSRF Token api request failed") }
sync.go
// Copyright © 2017 NAME HERE <EMAIL ADDRESS> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "context" "os" "os/signal" "syscall" "github.com/apex/log" "github.com/apex/log/handlers/json" "github.com/apex/log/handlers/multi" "github.com/apex/log/handlers/text" "github.com/marpio/mirror/crypto" "github.com/marpio/mirror/metadata" "github.com/marpio/mirror/metadata/repo" "github.com/marpio/mirror/storage" "github.com/marpio/mirror/storage/remotebackend" "github.com/marpio/mirror/syncronizer" "github.com/spf13/afero" "github.com/spf13/cobra" ) var syncCmd = &cobra.Command{ Use: "sync", Short: "Sync local directory with a remote.", Long: "", Run: func(cmd *cobra.Command, args []string) { runSync(args[0]) }, } func getenv(n string) string {
func runSync(dir string) { logFile, err := os.Create("log.json") if err != nil { log.Fatal("error creating log file") } defer logFile.Close() log.SetHandler(multi.New( text.New(os.Stderr), json.New(logFile), )) ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() logctx := log.WithFields(log.Fields{ "cmd": "mirror-cli", "syncing_dir": dir, }) encryptionKey := getenv("ENCR_KEY") b2id := getenv("B2_ACCOUNT_ID") b2key := getenv("B2_ACCOUNT_KEY") bucketName := getenv("B2_BUCKET_NAME") rsBackend := remotebackend.NewB2(ctx, b2id, b2key, bucketName) rs := storage.NewRemote(rsBackend, crypto.NewService(encryptionKey)) dbPath := getenv("REPO") repo, err := repo.NewHashmap(ctx, rs, dbPath) if err != nil { log.Fatalf("error creating metadata repository: %v", err) } sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT) defer close(sigs) go func() { for { select { case <-sigs: logctx.Warn("SIGINT - saving and terminating...") repo.Persist(ctx) cancel() return } } }() appFs := afero.NewOsFs() localFilesRepo := storage.NewLocal(appFs, crypto.GenerateSha256) syncronizer := syncronizer.New(rs, repo, localFilesRepo, metadata.NewExtractor(localFilesRepo)) syncronizer.Execute(ctx, logctx, dir) logctx.Info("done syncing.") }
v := os.Getenv(n) if v == "" { panic("could not find env var " + n) } return v }
expand.rs
use super::RoundKeys; use crate::ni::arch::*; use core::{mem, ptr}; macro_rules! expand_round { ($t1:expr, $t3:expr, $round:expr) => {{ let mut t1 = $t1; let mut t2; let mut t3 = $t3; let mut t4; t2 = _mm_aeskeygenassist_si128(t3, $round); t2 = _mm_shuffle_epi32(t2, 0x55); t4 = _mm_slli_si128(t1, 0x4); t1 = _mm_xor_si128(t1, t4); t4 = _mm_slli_si128(t4, 0x4); t1 = _mm_xor_si128(t1, t4); t4 = _mm_slli_si128(t4, 0x4); t1 = _mm_xor_si128(t1, t4); t1 = _mm_xor_si128(t1, t2); t2 = _mm_shuffle_epi32(t1, 0xff); t4 = _mm_slli_si128(t3, 0x4); t3 = _mm_xor_si128(t3, t4); t3 = _mm_xor_si128(t3, t2); (t1, t3) }}; } macro_rules! shuffle { ($a:expr, $b:expr, $imm:expr) => { mem::transmute::<_, __m128i>(_mm_shuffle_pd(mem::transmute($a), mem::transmute($b), $imm)) }; } #[inline(always)] pub(super) fn expand(key: &[u8; 24]) -> (RoundKeys, RoundKeys)
{ unsafe { let mut enc_keys: RoundKeys = mem::zeroed(); let mut dec_keys: RoundKeys = mem::zeroed(); macro_rules! store { ($i:expr, $k:expr) => { enc_keys[$i] = $k; dec_keys[$i] = _mm_aesimc_si128($k); }; } // we are being extra pedantic here to remove out-of-bound access. // this should be optimized out into movups, movsd sequence // note that unaligned load MUST be used here, even though we read // from the array (compiler missoptimizes aligned load) let (k0, k1l) = { let mut t = [0u8; 32]; ptr::write(t.as_mut_ptr() as *mut [u8; 24], *key); // Safety: `loadu` supports unaligned loads #[allow(clippy::cast_ptr_alignment)] ( _mm_loadu_si128(t.as_ptr() as *const __m128i), _mm_loadu_si128(t.as_ptr().offset(16) as *const __m128i), ) }; enc_keys[0] = k0; dec_keys[0] = k0; let (k1_2, k2r) = expand_round!(k0, k1l, 0x01); let k1 = shuffle!(k1l, k1_2, 0); let k2 = shuffle!(k1_2, k2r, 1); store!(1, k1); store!(2, k2); let (k3, k4l) = expand_round!(k1_2, k2r, 0x02); store!(3, k3); let (k4_5, k5r) = expand_round!(k3, k4l, 0x04); let k4 = shuffle!(k4l, k4_5, 0); let k5 = shuffle!(k4_5, k5r, 1); store!(4, k4); store!(5, k5); let (k6, k7l) = expand_round!(k4_5, k5r, 0x08); store!(6, k6); let (k7_8, k8r) = expand_round!(k6, k7l, 0x10); let k7 = shuffle!(k7l, k7_8, 0); let k8 = shuffle!(k7_8, k8r, 1); store!(7, k7); store!(8, k8); let (k9, k10l) = expand_round!(k7_8, k8r, 0x20); store!(9, k9); let (k10_11, k11r) = expand_round!(k9, k10l, 0x40); let k10 = shuffle!(k10l, k10_11, 0); let k11 = shuffle!(k10_11, k11r, 1); store!(10, k10); store!(11, k11); let (k12, _) = expand_round!(k10_11, k11r, 0x80); enc_keys[12] = k12; dec_keys[12] = k12; (enc_keys, dec_keys) } }
broker.go
/* Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package mtbroker import ( "context" "errors" "fmt" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/record" duckv1alpha1 "knative.dev/eventing/pkg/apis/duck/v1alpha1" "knative.dev/eventing/pkg/apis/eventing" "knative.dev/eventing/pkg/apis/eventing/v1alpha1" messagingv1beta1 "knative.dev/eventing/pkg/apis/messaging/v1beta1" clientset "knative.dev/eventing/pkg/client/clientset/versioned" brokerreconciler "knative.dev/eventing/pkg/client/injection/reconciler/eventing/v1alpha1/broker" eventinglisters "knative.dev/eventing/pkg/client/listers/eventing/v1alpha1" messaginglisters "knative.dev/eventing/pkg/client/listers/messaging/v1alpha1" "knative.dev/eventing/pkg/duck" "knative.dev/eventing/pkg/reconciler/mtbroker/resources" "knative.dev/eventing/pkg/reconciler/names" "knative.dev/pkg/apis" duckapis "knative.dev/pkg/apis/duck" "knative.dev/pkg/logging" pkgreconciler "knative.dev/pkg/reconciler" "knative.dev/pkg/resolver" "knative.dev/pkg/system" ) const ( // Name of the corev1.Events emitted from the Broker reconciliation process. brokerReconcileError = "BrokerReconcileError" brokerReconciled = "BrokerReconciled" BrokerFilterName = "broker-filter" BrokerIngressName = "broker-ingress" ) type Reconciler struct { eventingClientSet clientset.Interface dynamicClientSet dynamic.Interface kubeClientSet kubernetes.Interface // listers index properties about resources brokerLister eventinglisters.BrokerLister endpointsLister corev1listers.EndpointsLister subscriptionLister messaginglisters.SubscriptionLister triggerLister eventinglisters.TriggerLister channelableTracker duck.ListableTracker // Dynamic tracker to track KResources. In particular, it tracks the dependency between Triggers and Sources. kresourceTracker duck.ListableTracker // Dynamic tracker to track AddressableTypes. In particular, it tracks Trigger subscribers. addressableTracker duck.ListableTracker uriResolver *resolver.URIResolver // If specified, only reconcile brokers with these labels brokerClass string recorder record.EventRecorder } // Check that our Reconciler implements Interface var _ brokerreconciler.Interface = (*Reconciler)(nil) var _ brokerreconciler.Finalizer = (*Reconciler)(nil) var brokerGVK = v1alpha1.SchemeGroupVersion.WithKind("Broker") // ReconcilerArgs are the arguments needed to create a broker.Reconciler. type ReconcilerArgs struct { IngressImage string IngressServiceAccountName string FilterImage string FilterServiceAccountName string } func newReconciledNormal(namespace, name string) pkgreconciler.Event { return pkgreconciler.NewEvent(corev1.EventTypeNormal, brokerReconciled, "Broker reconciled: \"%s/%s\"", namespace, name) } func (r *Reconciler) ReconcileKind(ctx context.Context, b *v1alpha1.Broker) pkgreconciler.Event { err := r.reconcileKind(ctx, b) if err != nil { logging.FromContext(ctx).Errorw("Problem reconciling broker", zap.Error(err)) } if b.Status.IsReady() { // So, at this point the Broker is ready and everything should be solid // for the triggers to act upon, so reconcile them. te := r.reconcileTriggers(ctx, b) if te != nil { logging.FromContext(ctx).Errorw("Problem reconciling triggers", zap.Error(te)) return fmt.Errorf("failed to reconcile triggers: %v", te) } } else { // Broker is not ready, but propagate it's status to my triggers. if te := r.propagateBrokerStatusToTriggers(ctx, b.Namespace, b.Name, &b.Status); te != nil { return fmt.Errorf("Trigger reconcile failed: %v", te) } } return err } func (r *Reconciler) reconcileKind(ctx context.Context, b *v1alpha1.Broker) pkgreconciler.Event { logging.FromContext(ctx).Debugw("Reconciling", zap.Any("Broker", b)) b.Status.InitializeConditions() b.Status.ObservedGeneration = b.Generation // 1. Trigger Channel is created for all events. Triggers will Subscribe to this Channel. // 2. Check that Filter / Ingress deployment (shared within cluster are there) chanMan, err := r.getChannelTemplate(ctx, b) if err != nil { b.Status.MarkTriggerChannelFailed("ChannelTemplateFailed", "Error on setting up the ChannelTemplate: %s", err) return err } logging.FromContext(ctx).Infow("Reconciling the trigger channel") c, err := resources.NewChannel("trigger", b, &chanMan.template, TriggerChannelLabels(b.Name)) if err != nil { logging.FromContext(ctx).Errorw(fmt.Sprintf("Failed to create Trigger Channel object: %s/%s", chanMan.ref.Namespace, chanMan.ref.Name), zap.Error(err)) return err } triggerChan, err := r.reconcileChannel(ctx, chanMan.inf, chanMan.ref, c, b) if err != nil { logging.FromContext(ctx).Errorw("Problem reconciling the trigger channel", zap.Error(err)) b.Status.MarkTriggerChannelFailed("ChannelFailure", "%v", err) return fmt.Errorf("Failed to reconcile trigger channel: %v", err) } if triggerChan.Status.Address == nil { logging.FromContext(ctx).Debugw("Trigger Channel does not have an address", zap.Any("triggerChan", triggerChan)) b.Status.MarkTriggerChannelFailed("NoAddress", "Channel does not have an address.") // Ok to return nil for error here, once channel address becomes available, this will get requeued. return nil } if url := triggerChan.Status.Address.GetURL(); url.Host == "" { // We check the trigger Channel's address here because it is needed to create the Ingress Deployment. logging.FromContext(ctx).Debugw("Trigger Channel does not have an address", zap.Any("triggerChan", triggerChan)) b.Status.MarkTriggerChannelFailed("NoAddress", "Channel does not have an address.") // Ok to return nil for error here, once channel address becomes available, this will get requeued. return nil } b.Status.TriggerChannel = &chanMan.ref b.Status.PropagateTriggerChannelReadiness(&triggerChan.Status) filterEndpoints, err := r.endpointsLister.Endpoints(system.Namespace()).Get(BrokerFilterName) if err != nil { logging.FromContext(ctx).Errorw("Problem getting endpoints for filter", zap.String("namespace", system.Namespace()), zap.Error(err)) b.Status.MarkFilterFailed("ServiceFailure", "%v", err) return err } b.Status.PropagateFilterAvailability(filterEndpoints) ingressEndpoints, err := r.endpointsLister.Endpoints(system.Namespace()).Get(BrokerIngressName) if err != nil { logging.FromContext(ctx).Errorw("Problem getting endpoints for ingress", zap.String("namespace", system.Namespace()), zap.Error(err)) b.Status.MarkIngressFailed("ServiceFailure", "%v", err) return err } b.Status.PropagateIngressAvailability(ingressEndpoints) // Route everything to shared ingress, just tack on the namespace/name as path // so we can route there appropriately. b.Status.SetAddress(&apis.URL{ Scheme: "http", Host: names.ServiceHostName("broker-ingress", system.Namespace()), Path: fmt.Sprintf("/%s/%s", b.Namespace, b.Name), }) // So, at this point the Broker is ready and everything should be solid // for the triggers to act upon. return nil } type channelTemplate struct { ref corev1.ObjectReference inf dynamic.ResourceInterface template messagingv1beta1.ChannelTemplateSpec } func (r *Reconciler) getChannelTemplate(ctx context.Context, b *v1alpha1.Broker) (*channelTemplate, error) { triggerChannelName := resources.BrokerChannelName(b.Name, "trigger") ref := corev1.ObjectReference{ Name: triggerChannelName, Namespace: b.Namespace, } var template *messagingv1beta1.ChannelTemplateSpec if b.Spec.Config != nil { if b.Spec.Config.Kind == "ConfigMap" && b.Spec.Config.APIVersion == "v1" { if b.Spec.Config.Namespace == "" || b.Spec.Config.Name == "" { logging.FromContext(ctx).Errorw("Broker.Spec.Config name and namespace are required", zap.String("namespace", b.Namespace), zap.String("name", b.Name)) return nil, errors.New("Broker.Spec.Config name and namespace are required") } cm, err := r.kubeClientSet.CoreV1().ConfigMaps(b.Spec.Config.Namespace).Get(b.Spec.Config.Name, metav1.GetOptions{}) if err != nil { return nil, err } // TODO: there are better ways to do this... if config, err := NewConfigFromConfigMapFunc(ctx)(cm); err != nil { return nil, err } else if config != nil { template = &config.DefaultChannelTemplate } logging.FromContext(ctx).Info("Using channel template = ", template) } else { return nil, errors.New("Broker.Spec.Config configuration not supported, only [kind: ConfigMap, apiVersion: v1]") } } else if b.Spec.ChannelTemplate != nil { template = b.Spec.ChannelTemplate } else { logging.FromContext(ctx).Errorw("Broker.Spec.ChannelTemplate is nil", zap.String("namespace", b.Namespace), zap.String("name", b.Name)) return nil, errors.New("Broker.Spec.ChannelTemplate is nil") } if template == nil { return nil, errors.New("failed to find channelTemplate") } ref.APIVersion = template.APIVersion ref.Kind = template.Kind gvr, _ := meta.UnsafeGuessKindToResource(template.GetObjectKind().GroupVersionKind()) inf := r.dynamicClientSet.Resource(gvr).Namespace(b.Namespace) if inf == nil { return nil, fmt.Errorf("unable to create dynamic client for: %+v", template) } track := r.channelableTracker.TrackInNamespace(b) // Start tracking the trigger channel. if err := track(ref); err != nil { return nil, fmt.Errorf("unable to track changes to the trigger Channel: %v", err) } return &channelTemplate{ ref: ref, inf: inf, template: *template, }, nil } func (r *Reconciler) FinalizeKind(ctx context.Context, b *v1alpha1.Broker) pkgreconciler.Event { if err := r.propagateBrokerStatusToTriggers(ctx, b.Namespace, b.Name, nil); err != nil { return fmt.Errorf("Trigger reconcile failed: %v", err) } return newReconciledNormal(b.Namespace, b.Name) } // reconcileChannel reconciles Broker's 'b' underlying channel. func (r *Reconciler) reconcileChannel(ctx context.Context, channelResourceInterface dynamic.ResourceInterface, channelObjRef corev1.ObjectReference, newChannel *unstructured.Unstructured, b *v1alpha1.Broker) (*duckv1alpha1.Channelable, error) { lister, err := r.channelableTracker.ListerFor(channelObjRef) if err != nil { logging.FromContext(ctx).Errorw(fmt.Sprintf("Error getting lister for Channel: %s/%s", channelObjRef.Namespace, channelObjRef.Name), zap.Error(err)) return nil, err } c, err := lister.ByNamespace(channelObjRef.Namespace).Get(channelObjRef.Name) // If the resource doesn't exist, we'll create it if err != nil { if apierrs.IsNotFound(err) { logging.FromContext(ctx).Info(fmt.Sprintf("Creating Channel Object: %+v", newChannel)) created, err := channelResourceInterface.Create(newChannel, metav1.CreateOptions{}) if err != nil { logging.FromContext(ctx).Errorw(fmt.Sprintf("Failed to create Channel: %s/%s", channelObjRef.Namespace, channelObjRef.Name), zap.Error(err)) return nil, err } logging.FromContext(ctx).Info(fmt.Sprintf("Created Channel: %s/%s", channelObjRef.Namespace, channelObjRef.Name), zap.Any("NewChannel", newChannel)) channelable := &duckv1alpha1.Channelable{} err = duckapis.FromUnstructured(created, channelable) if err != nil { logging.FromContext(ctx).Errorw(fmt.Sprintf("Failed to convert to Channelable Object: %s/%s", channelObjRef.Namespace, channelObjRef.Name), zap.Any("createdChannel", created), zap.Error(err)) return nil, err } return channelable, nil } logging.FromContext(ctx).Errorw(fmt.Sprintf("Failed to get Channel: %s/%s", channelObjRef.Namespace, channelObjRef.Name), zap.Error(err)) return nil, err } logging.FromContext(ctx).Debugw(fmt.Sprintf("Found Channel: %s/%s", channelObjRef.Namespace, channelObjRef.Name)) channelable, ok := c.(*duckv1alpha1.Channelable) if !ok { logging.FromContext(ctx).Errorw(fmt.Sprintf("Failed to convert to Channelable Object: %s/%s", channelObjRef.Namespace, channelObjRef.Name), zap.Error(err)) return nil, err } return channelable, nil } // TriggerChannelLabels are all the labels placed on the Trigger Channel for the given brokerName. This // should only be used by Broker and Trigger code. func TriggerChannelLabels(brokerName string) map[string]string
// reconcileTriggers reconciles the Triggers that are pointed to this broker func (r *Reconciler) reconcileTriggers(ctx context.Context, b *v1alpha1.Broker) error { // TODO: Figure out the labels stuff... If webhook does it, we can filter like this... // Find all the Triggers that have been labeled as belonging to me /* triggers, err := r.triggerLister.Triggers(b.Namespace).List(labels.SelectorFromSet(brokerLabels(b.brokerClass))) */ triggers, err := r.triggerLister.Triggers(b.Namespace).List(labels.Everything()) if err != nil { return err } for _, t := range triggers { if t.Spec.Broker == b.Name { trigger := t.DeepCopy() tErr := r.reconcileTrigger(ctx, b, trigger) if tErr != nil { logging.FromContext(ctx).Errorw("Reconciling trigger failed:", zap.String("name", t.Name), zap.Error(err)) r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerReconcileFailed, "Trigger reconcile failed: %v", tErr) } else { r.recorder.Event(trigger, corev1.EventTypeNormal, triggerReconciled, "Trigger reconciled") } trigger.Status.ObservedGeneration = t.Generation if _, updateStatusErr := r.updateTriggerStatus(ctx, trigger); updateStatusErr != nil { logging.FromContext(ctx).Errorw("Failed to update Trigger status", zap.Error(updateStatusErr)) r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerUpdateStatusFailed, "Failed to update Trigger's status: %v", updateStatusErr) } } } return nil } /* TODO: Enable once we start filtering by classes of brokers func brokerLabels(name string) map[string]string { return map[string]string{ brokerAnnotationKey: name, } } */ func (r *Reconciler) propagateBrokerStatusToTriggers(ctx context.Context, namespace, name string, bs *v1alpha1.BrokerStatus) error { triggers, err := r.triggerLister.Triggers(namespace).List(labels.Everything()) if err != nil { return err } for _, t := range triggers { if t.Spec.Broker == name { // Don't modify informers copy trigger := t.DeepCopy() trigger.Status.InitializeConditions() if bs == nil { trigger.Status.MarkBrokerFailed("BrokerDoesNotExist", "Broker %q does not exist", name) } else { trigger.Status.PropagateBrokerStatus(bs) } if _, updateStatusErr := r.updateTriggerStatus(ctx, trigger); updateStatusErr != nil { logging.FromContext(ctx).Errorw("Failed to update Trigger status", zap.Error(updateStatusErr)) r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerUpdateStatusFailed, "Failed to update Trigger's status: %v", updateStatusErr) return updateStatusErr } } } return nil }
{ return map[string]string{ eventing.BrokerLabelKey: brokerName, "eventing.knative.dev/brokerEverything": "true", } }
ShaderSource.js
define([ '../Core/defaultValue', '../Core/defined', '../Core/DeveloperError', '../Renderer/modernizeShader', '../Shaders/Builtin/CzmBuiltins', './AutomaticUniforms' ], function( defaultValue, defined, DeveloperError, modernizeShader, CzmBuiltins, AutomaticUniforms) { 'use strict'; function removeComments(source) { // remove inline comments source = source.replace(/\/\/.*/g, ''); // remove multiline comment block return source.replace(/\/\*\*[\s\S]*?\*\//gm, function(match) { // preserve the number of lines in the comment block so the line numbers will be correct when debugging shaders var numberOfLines = match.match(/\n/gm).length; var replacement = ''; for (var lineNumber = 0; lineNumber < numberOfLines; ++lineNumber) { replacement += '\n'; } return replacement; }); } function getDependencyNode(name, glslSource, nodes) { var dependencyNode; // check if already loaded for (var i = 0; i < nodes.length; ++i) { if (nodes[i].name === name) { dependencyNode = nodes[i]; } } if (!defined(dependencyNode)) { // strip doc comments so we don't accidentally try to determine a dependency for something found // in a comment glslSource = removeComments(glslSource); // create new node dependencyNode = { name : name, glslSource : glslSource, dependsOn : [], requiredBy : [], evaluated : false }; nodes.push(dependencyNode); } return dependencyNode; } function generateDependencies(currentNode, dependencyNodes) { if (currentNode.evaluated) { return; } currentNode.evaluated = true; // identify all dependencies that are referenced from this glsl source code var czmMatches = currentNode.glslSource.match(/\bczm_[a-zA-Z0-9_]*/g); if (defined(czmMatches) && czmMatches !== null) { // remove duplicates czmMatches = czmMatches.filter(function(elem, pos) { return czmMatches.indexOf(elem) === pos; }); czmMatches.forEach(function(element) { if (element !== currentNode.name && ShaderSource._czmBuiltinsAndUniforms.hasOwnProperty(element)) { var referencedNode = getDependencyNode(element, ShaderSource._czmBuiltinsAndUniforms[element], dependencyNodes); currentNode.dependsOn.push(referencedNode); referencedNode.requiredBy.push(currentNode); // recursive call to find any dependencies of the new node generateDependencies(referencedNode, dependencyNodes); } }); } } function sortDependencies(dependencyNodes) { var nodesWithoutIncomingEdges = []; var allNodes = []; while (dependencyNodes.length > 0) { var node = dependencyNodes.pop(); allNodes.push(node); if (node.requiredBy.length === 0) { nodesWithoutIncomingEdges.push(node); } } while (nodesWithoutIncomingEdges.length > 0) { var currentNode = nodesWithoutIncomingEdges.shift(); dependencyNodes.push(currentNode); for (var i = 0; i < currentNode.dependsOn.length; ++i) { // remove the edge from the graph var referencedNode = currentNode.dependsOn[i]; var index = referencedNode.requiredBy.indexOf(currentNode); referencedNode.requiredBy.splice(index, 1); // if referenced node has no more incoming edges, add to list if (referencedNode.requiredBy.length === 0) { nodesWithoutIncomingEdges.push(referencedNode); } } } // if there are any nodes left with incoming edges, then there was a circular dependency somewhere in the graph var badNodes = []; for (var j = 0; j < allNodes.length; ++j) { if (allNodes[j].requiredBy.length !== 0) { badNodes.push(allNodes[j]); } } //>>includeStart('debug', pragmas.debug); if (badNodes.length !== 0) { var message = 'A circular dependency was found in the following built-in functions/structs/constants: \n'; for (var k = 0; k < badNodes.length; ++k) { message = message + badNodes[k].name + '\n'; } throw new DeveloperError(message); } //>>includeEnd('debug'); } function getBuiltinsAndAutomaticUniforms(shaderSource) { // generate a dependency graph for builtin functions var dependencyNodes = []; var root = getDependencyNode('main', shaderSource, dependencyNodes); generateDependencies(root, dependencyNodes); sortDependencies(dependencyNodes); // Concatenate the source code for the function dependencies. // Iterate in reverse so that dependent items are declared before they are used. var builtinsSource = ''; for (var i = dependencyNodes.length - 1; i >= 0; --i) { builtinsSource = builtinsSource + dependencyNodes[i].glslSource + '\n'; } return builtinsSource.replace(root.glslSource, ''); } function combineShader(shaderSource, isFragmentShader, context) { var i; var length; // Combine shader sources, generally for pseudo-polymorphism, e.g., czm_getMaterial. var combinedSources = ''; var sources = shaderSource.sources; if (defined(sources)) { for (i = 0, length = sources.length; i < length; ++i) { // #line needs to be on its own line. combinedSources += '\n#line 0\n' + sources[i]; } } combinedSources = removeComments(combinedSources); // Extract existing shader version from sources var version; combinedSources = combinedSources.replace(/#version\s+(.*?)\n/gm, function(match, group1) { //>>includeStart('debug', pragmas.debug); if (defined(version) && version !== group1) { throw new DeveloperError('inconsistent versions found: ' + version + ' and ' + group1); } //>>includeEnd('debug'); // Extract #version to put at the top version = group1; // Replace original #version directive with a new line so the line numbers // are not off by one. There can be only one #version directive // and it must appear at the top of the source, only preceded by // whitespace and comments. return '\n'; }); // Extract shader extensions from sources var extensions = []; combinedSources = combinedSources.replace(/#extension.*\n/gm, function(match) { // Extract extension to put at the top extensions.push(match); // Replace original #extension directive with a new line so the line numbers // are not off by one. return '\n'; }); // Remove precision qualifier combinedSources = combinedSources.replace(/precision\s(lowp|mediump|highp)\s(float|int);/, ''); // Replace main() for picked if desired. var pickColorQualifier = shaderSource.pickColorQualifier; if (defined(pickColorQualifier)) { combinedSources = ShaderSource.createPickFragmentShaderSource(combinedSources, pickColorQualifier); } // combine into single string var result = ''; // #version must be first // defaults to #version 100 if not specified if (defined(version)) { result = '#version ' + version + '\n'; } var extensionsLength = extensions.length; for (i = 0; i < extensionsLength; i++) { result += extensions[i]; } if (isFragmentShader) { result += '\ #ifdef GL_FRAGMENT_PRECISION_HIGH\n\ precision highp float;\n\ #else\n\ precision mediump float;\n\ #endif\n\n'; } // Prepend #defines for uber-shaders var defines = shaderSource.defines; if (defined(defines)) { for (i = 0, length = defines.length; i < length; ++i) { var define = defines[i]; if (define.length !== 0) { result += '#define ' + define + '\n'; } } } // GLSLModernizer inserts its own layout qualifiers // at this position in the source if (context.webgl2) { result += '#define OUTPUT_DECLARATION\n\n'; } // append built-ins if (shaderSource.includeBuiltIns) { result += getBuiltinsAndAutomaticUniforms(combinedSources); } // reset line number result += '\n#line 0\n'; // append actual source result += combinedSources; // modernize the source if (context.webgl2) { result = modernizeShader(result, isFragmentShader, true); } return result; } /** * An object containing various inputs that will be combined to form a final GLSL shader string. * * @param {Object} [options] Object with the following properties: * @param {String[]} [options.sources] An array of strings to combine containing GLSL code for the shader. * @param {String[]} [options.defines] An array of strings containing GLSL identifiers to <code>#define</code>. * @param {String} [options.pickColorQualifier] The GLSL qualifier, <code>uniform</code> or <code>varying</code>, for the input <code>czm_pickColor</code>. When defined, a pick fragment shader is generated. * @param {Boolean} [options.includeBuiltIns=true] If true, referenced built-in functions will be included with the combined shader. Set to false if this shader will become a source in another shader, to avoid duplicating functions. * * @exception {DeveloperError} options.pickColorQualifier must be 'uniform' or 'varying'. * * @example * // 1. Prepend #defines to a shader * var source = new Cesium.ShaderSource({ * defines : ['WHITE'], * sources : ['void main() { \n#ifdef WHITE\n gl_FragColor = vec4(1.0); \n#else\n gl_FragColor = vec4(0.0); \n#endif\n }'] * }); * * // 2. Modify a fragment shader for picking * var source = new Cesium.ShaderSource({ * sources : ['void main() { gl_FragColor = vec4(1.0); }'], * pickColorQualifier : 'uniform' * }); * * @private */ function ShaderSource(options) { options = defaultValue(options, defaultValue.EMPTY_OBJECT); var pickColorQualifier = options.pickColorQualifier; //>>includeStart('debug', pragmas.debug); if (defined(pickColorQualifier) && pickColorQualifier !== 'uniform' && pickColorQualifier !== 'varying') { throw new DeveloperError('options.pickColorQualifier must be \'uniform\' or \'varying\'.'); } //>>includeEnd('debug'); this.defines = defined(options.defines) ? options.defines.slice(0) : []; this.sources = defined(options.sources) ? options.sources.slice(0) : []; this.pickColorQualifier = pickColorQualifier; this.includeBuiltIns = defaultValue(options.includeBuiltIns, true); } ShaderSource.prototype.clone = function() { return new ShaderSource({ sources : this.sources, defines : this.defines, pickColorQualifier : this.pickColorQualifier, includeBuiltIns : this.includeBuiltIns }); }; ShaderSource.replaceMain = function(source, renamedMain) { renamedMain = 'void ' + renamedMain + '()'; return source.replace(/void\s+main\s*\(\s*(?:void)?\s*\)/g, renamedMain); }; /** * Create a single string containing the full, combined vertex shader with all dependencies and defines. * * @param {Context} context The current rendering context * * @returns {String} The combined shader string. */ ShaderSource.prototype.createCombinedVertexShader = function(context) { return combineShader(this, false, context); }; /** * Create a single string containing the full, combined fragment shader with all dependencies and defines. * * @param {Context} context The current rendering context * * @returns {String} The combined shader string. */ ShaderSource.prototype.createCombinedFragmentShader = function(context) { return combineShader(this, true, context); }; /** * For ShaderProgram testing * @private */ ShaderSource._czmBuiltinsAndUniforms = {}; // combine automatic uniforms and Cesium built-ins for ( var builtinName in CzmBuiltins) { if (CzmBuiltins.hasOwnProperty(builtinName)) { ShaderSource._czmBuiltinsAndUniforms[builtinName] = CzmBuiltins[builtinName]; } } for ( var uniformName in AutomaticUniforms) { if (AutomaticUniforms.hasOwnProperty(uniformName)) { var uniform = AutomaticUniforms[uniformName]; if (typeof uniform.getDeclaration === 'function') { ShaderSource._czmBuiltinsAndUniforms[uniformName] = uniform.getDeclaration(uniformName); } } } ShaderSource.createPickVertexShaderSource = function(vertexShaderSource) { var renamedVS = ShaderSource.replaceMain(vertexShaderSource, 'czm_old_main'); var pickMain = 'attribute vec4 pickColor; \n' + 'varying vec4 czm_pickColor; \n' + 'void main() \n' + '{ \n' + ' czm_old_main(); \n' + ' czm_pickColor = pickColor; \n' + '}'; return renamedVS + '\n' + pickMain; }; ShaderSource.createPickFragmentShaderSource = function(fragmentShaderSource, pickColorQualifier) { var renamedFS = ShaderSource.replaceMain(fragmentShaderSource, 'czm_old_main');
'{ \n' + ' czm_old_main(); \n' + ' if (gl_FragColor.a == 0.0) { \n' + ' discard; \n' + ' } \n' + ' gl_FragColor = czm_pickColor; \n' + '}'; return renamedFS + '\n' + pickMain; }; ShaderSource.findVarying = function(shaderSource, names) { var sources = shaderSource.sources; var namesLength = names.length; for (var i = 0; i < namesLength; ++i) { var name = names[i]; var sourcesLength = sources.length; for (var j = 0; j < sourcesLength; ++j) { if (sources[j].indexOf(name) !== -1) { return name; } } } return undefined; }; var normalVaryingNames = ['v_normalEC', 'v_normal']; ShaderSource.findNormalVarying = function(shaderSource) { return ShaderSource.findVarying(shaderSource, normalVaryingNames); }; var positionVaryingNames = ['v_positionEC']; ShaderSource.findPositionVarying = function(shaderSource) { return ShaderSource.findVarying(shaderSource, positionVaryingNames); }; return ShaderSource; });
var pickMain = pickColorQualifier + ' vec4 czm_pickColor; \n' + 'void main() \n' +
695. Max Area of Island_test.go
package leetcode import ( "fmt" "testing" ) type question695 struct { para695 ans695 } // para 是参数 // one 代表第一个参数 type para695 struct { one [][]int } // ans 是答案 // one 代表第一个答案 type ans695 struct { one int } func Test_Problem695(t *testing.T) { qs :=
question695{ para695{[][]int{ []int{1, 1, 1, 1, 0}, []int{1, 1, 0, 1, 0}, []int{1, 1, 0, 0, 0}, []int{0, 0, 0, 0, 0}, }}, ans695{9}, }, question695{ para695{[][]int{ []int{1, 1, 0, 0, 0}, []int{1, 1, 0, 0, 0}, []int{0, 0, 1, 0, 0}, []int{0, 0, 0, 1, 1}, }}, ans695{4}, }, question695{ para695{[][]int{ []int{1, 1, 1, 1, 1, 1, 1, 0}, []int{1, 0, 0, 0, 0, 1, 1, 0}, []int{1, 0, 1, 0, 1, 1, 1, 0}, []int{1, 0, 0, 0, 0, 1, 0, 1}, []int{1, 1, 1, 1, 1, 1, 1, 0}, }}, ans695{23}, }, question695{ para695{[][]int{ []int{0, 0, 1, 0, 0}, []int{0, 1, 0, 1, 0}, []int{0, 1, 1, 1, 0}, }}, ans695{5}, }, question695{ para695{[][]int{ []int{1, 1, 1, 1, 1, 1, 1}, []int{1, 0, 0, 0, 0, 0, 1}, []int{1, 0, 1, 1, 1, 0, 1}, []int{1, 0, 1, 0, 1, 0, 1}, []int{1, 0, 1, 1, 1, 0, 1}, []int{1, 0, 0, 0, 0, 0, 1}, []int{1, 1, 1, 1, 1, 1, 1}, }}, ans695{24}, }, } fmt.Printf("------------------------Leetcode Problem 695------------------------\n") for _, q := range qs { _, p := q.ans695, q.para695 fmt.Printf("【input】:%v 【output】:%v\n", p, maxAreaOfIsland(p.one)) } fmt.Printf("\n\n\n") }
[]question695{
styles.js
import styled from 'styled-components'; export const Container = styled.svg` animation: rotate 2s linear infinite; margin: 2.5px; width: ${props => props.size}px; height: ${props => props.size}px; & .path { stroke: ${props => props.color};
@keyframes rotate { 100% { transform: rotate(360deg); } } @keyframes dash { 0% { stroke-dasharray: 1, 150; stroke-dashoffset: 0; } 50% { stroke-dasharray: 90, 150; stroke-dashoffset: -35; } 100% { stroke-dasharray: 90, 150; stroke-dashoffset: -124; } } `;
stroke-linecap: round; animation: dash 1.5s ease-in-out infinite; }
filterbr.py
#!/usr/bin/env python3 # 2017, Georg Sauthoff <[email protected]>, GPLv3 import sys def skip_comments(lines): state = 0 for line in lines: n = len(line) l = '' p = 0 while p < n: if state == 0: a = line.find('//', p) b = line.find('/*', p) if a > -1 and (a < b or b == -1): l += line[p:a] p = n elif b > -1 and (b < a or a == -1): l += line[p:b] p = b+2 state = 1 else: l += line[p:] p = n elif state == 1: a = line.rfind('*/', p) if a == -1: p = n else: p = a + 2 state = 0 yield l def cond_lines(lines): state = 0 pcnt = 0 for nr, line in enumerate(lines, 1): if not line: continue n = len(line) p = 0 do_yield = False while p < n: if state == 0: p = line.find('if', p) if p == -1: p = n continue if (p == 0 or not line[p-1].isalpha()) \ and (p+2 == len(line) or not line[p+2].isalpha()): do_yield = True state = 1 p += 2 elif state == 1: do_yield = True p = line.find('(', p) if p == -1: p = n else:
do_yield = True for p in range(p, n): if line[p] == '(': pcnt += 1 elif line[p] == ')': pcnt -= 1 if not pcnt: state = 0 break p += 1 if do_yield: yield nr def cond_lines_from_file(filename): with open(filename) as f: yield from cond_lines(skip_comments(f)) def filter_lcov_trace(lines): nrs = set() for line in lines: if line.startswith('SF:'): nrs = set(cond_lines_from_file(line[3:-1])) elif line.startswith('BRDA:'): xs = line[5:].split(',') nr = int(xs[0]) if xs else 0 if nr not in nrs: continue yield line def filter_lcov_trace_file(s_filename, d_file): with open(s_filename) as f: for l in filter_lcov_trace(f): print(l, end='', file=d_file) if __name__ == '__main__': #for l in cond_lines_from_file(sys.argv[1]): # print(l) filter_lcov_trace_file(sys.argv[1], sys.stdout) #with open(sys.argv[1]) as f: # for l in skip_comments(f): # print(l)
p += 1 state = 2 pcnt = 1 elif state == 2:
matcher.rs
use fuzzywuzzy::fuzz; use std::cmp::Ordering; use std::time::Duration; //use serde::{Serialize, Deserialize}; use crate::tagger::helpers::Helpers; use crate::tagger::{MultipleMatchesSort, TaggerConfig, Track}; const CONFIDENCE_LEVEL: f64 = 95.0; const MATCH_ARTISTS: bool = true; pub struct Matcher {} impl Matcher { fn match_duration(local: &Track, api: &Track, config: &TaggerConfig) -> bool { if !config.match_duration { return true; } else if local.duration.unwrap() == Duration::ZERO || api.duration.unwrap() == Duration::ZERO { return true; } else { let diff = (local.duration.unwrap().as_secs() as i64 - api.duration.unwrap().as_secs() as i64) .abs() as u64; return diff <= config.max_duration_difference; } }
let artist_coinc: f64 = fuzz::ratio( &local.artist.as_ref().unwrap(), &api.artist.as_ref().unwrap(), ) .into(); let remixer_coinc: f64 = fuzz::ratio( &local.remixer.as_ref().unwrap_or(&"".to_string()), &api.remixer.as_ref().unwrap_or(&"".to_string()), ) .into(); //Artists in vectors let artists_coinc: f64 = fuzz::token_sort_ratio( &Helpers::join_artists(&local.artists.as_ref().unwrap_or(&vec!["".to_string()])), &Helpers::join_artists(&api.artists.as_ref().unwrap_or(&vec!["".to_string()])), true, true, ) .into(); let main_artists_coinc: f64 = fuzz::token_sort_ratio( &Helpers::join_artists(&local.main_artists.as_ref().unwrap_or(&vec!["".to_string()])), &Helpers::join_artists(&api.main_artists.as_ref().unwrap_or(&vec!["".to_string()])), true, true, ) .into(); let feat_artists_coinc: f64 = fuzz::token_sort_ratio( &Helpers::join_artists(&local.feat_artists.as_ref().unwrap_or(&vec!["".to_string()])), &Helpers::join_artists(&api.feat_artists.as_ref().unwrap_or(&vec!["".to_string()])), true, true, ) .into(); let remixers_coinc: f64 = fuzz::token_sort_ratio( &Helpers::join_artists(&local.remixers.as_ref().unwrap_or(&vec!["".to_string()])), &Helpers::join_artists(&api.remixers.as_ref().unwrap_or(&vec!["".to_string()])), true, true, ) .into(); if artist_coinc >= strictness || artists_coinc >= strictness || (main_artists_coinc >= strictness && (feat_artists_coinc >= strictness || remixer_coinc >= strictness || remixers_coinc >= strictness)) { return true; } return false; } fn one_artist(local: &Track, api: &Track, strictness: f64) -> bool { for artist in local.artists.as_ref().unwrap_or(&vec!["".to_string()]) { if api .artists .as_ref() .unwrap_or(&vec!["".to_string()]) .contains(&artist) { return true; } } return false; } // Match local track to API track fn compare_tracks(local: &Track, api: &Track, config: &TaggerConfig) -> Option<(f64, Track)> { if !Matcher::match_duration(local, api, &config) { return None; } // Helpers let title_coinc: f64 = fuzz::ratio(&local.title.as_ref()?, &api.title.as_ref()?).into(); let name_coinc: f64 = fuzz::ratio(&local.name.as_ref()?, &api.name.as_ref()?).into(); let mix_coinc: f64 = fuzz::ratio(&local.mix.as_ref()?, &api.mix.as_ref()?).into(); // Exact match if title_coinc >= CONFIDENCE_LEVEL || (name_coinc >= CONFIDENCE_LEVEL && mix_coinc >= CONFIDENCE_LEVEL) { if MATCH_ARTISTS && Matcher::match_artists(local, api, config.strictness) { return Some((1.0, api.to_owned())); } else { return Some((1.0, api.to_owned())); } } // Fuzzy match else if title_coinc >= config.strictness { if MATCH_ARTISTS && Matcher::match_artists(local, api, config.strictness) { return Some((title_coinc, api.to_owned())); } else { return Some((title_coinc, api.to_owned())); } } // No match else { return None; } } // Match local track to API response (set of tracks) pub fn match_track( local: &Track, tracks: &Vec<Track>, config: &TaggerConfig, ) -> Option<(f64, Track)> { let mut exact_matches: Vec<(f64, Track)> = vec![]; let mut fuzzy_matches: Vec<(f64, Track)> = vec![]; // Go through API set of tracks for track in tracks { // Try to match with local track let result: Option<(f64, Track)> = Matcher::compare_tracks(local, track, &config); if result.as_ref().unwrap().0 == 1.00 { exact_matches.push(result.unwrap()) } else if result.as_ref().unwrap().0 > config.strictness { fuzzy_matches.push(result.unwrap()) } } // Use exact matches if !exact_matches.is_empty() { Matcher::sort_tracks(&mut exact_matches, &config); return Some(exact_matches[0].to_owned()); } // Use fuzzy matches else if !fuzzy_matches.is_empty() { fuzzy_matches.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap()); let best_acc = fuzzy_matches[0].0; let mut fuzz: Vec<(f64, Track)> = fuzzy_matches .into_iter() .filter(|(acc, _)| *acc >= best_acc) .collect(); Matcher::sort_tracks(&mut fuzz, &config); Some(fuzz[0].to_owned()) } // No match else { return None; } } /// Sort matched tracks by release dates fn sort_tracks(tracks: &mut Vec<(f64, Track)>, config: &TaggerConfig) { match config.multiple_matches { MultipleMatchesSort::Default => {} MultipleMatchesSort::Oldest => tracks.sort_by(|a, b| { if a.1.release_date.is_none() || b.1.release_date.is_none() { Ordering::Equal } else { a.1.release_date .as_ref() .unwrap() .cmp(b.1.release_date.as_ref().unwrap()) } }), MultipleMatchesSort::Newest => tracks.sort_by(|a, b| { if a.1.release_date.is_none() || b.1.release_date.is_none() { Ordering::Equal } else { b.1.release_date .as_ref() .unwrap() .cmp(a.1.release_date.as_ref().unwrap()) } }), } } }
fn match_artists(local: &Track, api: &Track, strictness: f64) -> bool { //Artist fields
tjfocExport_test.go
package interop import ( "crypto/x509/pkix" "testing" "github.com/Hyperledger-TWGC/tjfoc-gm/sm2" "github.com/Hyperledger-TWGC/tjfoc-gm/x509" ) func
(t *testing.T) { sm2PrivKey, err := sm2.GenerateKey(nil) Fatal(err, t) pemBytes, err := x509.WritePrivateKeyToPem(sm2PrivKey, nil) Fatal(err, t) var pemFile = "testdata/tjfoc/priv.pem" WriteFile(pemBytes, pemFile, t) pubKey, _ := sm2PrivKey.Public().(*sm2.PublicKey) pemFile = "testdata/tjfoc/pub.pem" pemBytes,err = x509.WritePublicKeyToPem(pubKey) WriteFile(pemBytes, pemFile, t) Fatal(err, t) pemFile = "testdata/tjfoc/req.pem" templateReq := &x509.CertificateRequest{ Subject: pkix.Name{ CommonName: "test.example.com", Organization: []string{"Test"}, }, SignatureAlgorithm: x509.SM2WithSM3, } pemBytes,err = x509.CreateCertificateRequestToPem(templateReq, sm2PrivKey) WriteFile(pemBytes, pemFile, t) Fatal(err, t) }
TestTJSM2Pem
structs.rs
//! Wrappers of rvm::Guest and rvm::Vcpu use alloc::sync::Arc; use spin::Mutex; use rcore_memory::{memory_set::MemoryAttr, PAGE_SIZE}; use rvm::{DefaultGuestPhysMemorySet, GuestPhysAddr, HostVirtAddr, RvmResult}; use rvm::{Guest as GuestInner, Vcpu as VcpuInner}; use super::memory::RvmPageTableHandlerDelay; use crate::memory::GlobalFrameAlloc; pub(super) struct Guest { gpm: Arc<DefaultGuestPhysMemorySet>, pub(super) inner: Arc<GuestInner>, } pub(super) struct Vcpu { pub(super) inner: Mutex<VcpuInner>, } impl Guest { pub fn new() -> RvmResult<Self> { let gpm = DefaultGuestPhysMemorySet::new(); Ok(Self { inner: GuestInner::new(gpm.clone())?, gpm, }) } pub fn add_memory_region(&self, gpaddr: GuestPhysAddr, size: usize) -> RvmResult<HostVirtAddr> { self.inner.add_memory_region(gpaddr, size, None)?; let thread = crate::process::current_thread().unwrap(); let hvaddr = thread.vm.lock().find_free_area(PAGE_SIZE, size); let handler = RvmPageTableHandlerDelay::new(gpaddr, hvaddr, self.gpm.clone(), GlobalFrameAlloc); thread.vm.lock().push( hvaddr, hvaddr + size, MemoryAttr::default().user().writable(), handler, "rvm_guest_physical", ); Ok(hvaddr) } } impl Vcpu { pub fn new(entry: u64, guest: Arc<GuestInner>) -> RvmResult<Self>
}
{ Ok(Self { inner: Mutex::new(VcpuInner::new(entry, guest)?), }) }
ConnectionsRepository.ts
import { EntityRepository, Repository } from "typeorm"; import { Connection } from "../entities/Connection"; @EntityRepository(Connection) class ConnectionsRepository extends Repository <Connection>{}
export { ConnectionsRepository };
uuid.rs
use crate::{Archive, Deserialize, Fallible, Serialize}; use uuid::Uuid; impl Archive for Uuid { type Archived = Uuid; type Resolver = (); unsafe fn resolve(&self, _: usize, _: Self::Resolver, out: *mut Self::Archived) { // Safety: Uuid is portable and has no padding out.write(*self); } } // Safety: Uuid is portable and has no padding #[cfg(feature = "copy")] unsafe impl crate::copy::ArchiveCopySafe for Uuid {} impl<S: Fallible + ?Sized> Serialize<S> for Uuid { fn serialize(&self, _: &mut S) -> Result<Self::Resolver, S::Error>
} impl<D: Fallible + ?Sized> Deserialize<Uuid, D> for Uuid { fn deserialize(&self, _: &mut D) -> Result<Uuid, D::Error> { Ok(*self) } } #[cfg(test)] mod rkyv_tests { use crate::{ archived_root, ser::{serializers::AlignedSerializer, Serializer}, util::AlignedVec, Deserialize, Infallible, }; use uuid::Uuid; #[test] fn test_serialize_deserialize() { let uuid_str = "f9168c5e-ceb2-4faa-b6bf-329bf39fa1e4"; let u = Uuid::parse_str(uuid_str).unwrap(); let mut serializer = AlignedSerializer::new(AlignedVec::new()); serializer .serialize_value(&u) .expect("failed to archive uuid"); let buf = serializer.into_inner(); let archived = unsafe { archived_root::<Uuid>(buf.as_ref()) }; assert_eq!(&u, archived); let deserialized = archived .deserialize(&mut Infallible) .expect("failed to deserialize uuid"); assert_eq!(u, deserialized); } }
{ Ok(()) }
word_break.rs
use std::collections::HashSet; fn
(s: &str, v: Vec<Option<usize>>) -> String { let mut idx = s.len(); let mut slice_vec = vec![]; while let Some(prev) = v[idx] { slice_vec.push(&s[prev..idx]); idx = prev; } slice_vec.reverse(); slice_vec.join(" ") } fn word_break(s: &str, dict: HashSet<&str>) -> Option<String> { let size = s.len() + 1; let mut possible = vec![None; size]; let check_word = |i,j| dict.get(&s[i..j]).map(|_| i); for i in 1..size { possible[i] = possible[i].or_else(|| check_word(0,i)); if possible[i].is_some() { for j in i+1..size { possible[j] = possible[j].or_else(|| check_word(i,j)); } if possible[s.len()].is_some() { return Some(create_string(s, possible)); } }; } None } fn main() { let mut set = HashSet::new(); set.insert("a"); set.insert("bc"); set.insert("abc"); set.insert("cd"); set.insert("b"); println!("{:?}", word_break("abcd", set).unwrap()); }
create_string
test_app_search.py
# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import jwt import pytest from elastic_enterprise_search import AppSearch, UnauthorizedError @pytest.fixture() def app_search(): yield AppSearch( "http://localhost:3002", http_auth="private-k3ra4bqu12vgnhe3wibdw69f" ) @pytest.mark.vcr() def test_list_engines(app_search): resp = app_search.list_engines() assert resp.status == 200 assert resp == { "meta": { "page": {"current": 1, "total_pages": 1, "total_results": 1, "size": 25} }, "results": [ {"name": "national-parks-demo", "type": "default", "language": None} ], } @pytest.mark.vcr() def test_list_documents(app_search): resp = app_search.list_documents( engine_name="national-parks-demo", page_size=2, current_page=3 ) assert resp.status == 200 assert resp == { "meta": { "page": {"current": 3, "total_pages": 30, "total_results": 59, "size": 2} }, "results": [ { "nps_link": "https://www.nps.gov/zion/index.htm", "title": "Zion", "date_established": "1919-11-19T06:00:00+00:00", "world_heritage_site": "false", "states": ["Utah"], "description": "Located at the junction of the Colorado Plateau, Great Basin, and Mojave Desert, this park contains sandstone features such as mesas, rock towers, and canyons, including the Virgin River Narrows. The various sandstone formations and the forks of the Virgin River create a wilderness divided into four ecosystems: desert, riparian, woodland, and coniferous forest.", "visitors": 4295127.0, "id": "park_zion", "location": "37.3,-113.05", "square_km": 595.8, "acres": 147237.02, }, { "nps_link": "https://www.nps.gov/yell/index.htm", "title": "Yellowstone", "date_established": "1872-03-01T06:00:00+00:00", "world_heritage_site": "true", "states": ["Wyoming", "Montana", "Idaho"], "description": "Situated on the Yellowstone Caldera, the park has an expansive network of geothermal areas including boiling mud pots, vividly colored hot springs such as Grand Prismatic Spring, and regularly erupting geysers, the best-known being Old Faithful. The yellow-hued Grand Canyon of the Yellowstone River contains several high waterfalls, while four mountain ranges traverse the park. More than 60 mammal species including gray wolves, grizzly bears, black bears, lynxes, bison, and elk, make this park one of the best wildlife viewing spots in the country.", "visitors": 4257177.0, "id": "park_yellowstone", "location": "44.6,-110.5", "square_km": 8983.2, "acres": 2219790.71, }, ], } @pytest.mark.vcr() def test_delete_documents(app_search): resp = app_search.delete_documents( engine_name="national-parks-demo", body=[ "park_yellowstone", "park_zion", ], ) assert resp.status == 200 assert resp == [ {"id": "park_yellowstone", "deleted": True}, {"id": "park_zion", "deleted": True}, ] @pytest.mark.vcr() def test_index_documents(app_search): resp = app_search.index_documents( engine_name="national-parks-demo", body=[ { "nps_link": "https://www.nps.gov/zion/index.htm", "title": "Zion", "date_established": "1919-11-19T06:00:00+00:00", "world_heritage_site": "false", "states": ["Utah"], "description": "Located at the junction of the Colorado Plateau, Great Basin, and Mojave Desert, this park contains sandstone features such as mesas, rock towers, and canyons, including the Virgin River Narrows. The various sandstone formations and the forks of the Virgin River create a wilderness divided into four ecosystems: desert, riparian, woodland, and coniferous forest.", "visitors": 4295127.0, "id": "park_zion", "location": "37.3,-113.05", "square_km": 595.8, "acres": 147237.02, }, { "nps_link": "https://www.nps.gov/yell/index.htm", "title": "Yellowstone", "date_established": "1872-03-01T06:00:00+00:00", "world_heritage_site": "true", "states": ["Wyoming", "Montana", "Idaho"], "description": "Situated on the Yellowstone Caldera, the park has an expansive network of geothermal areas including boiling mud pots, vividly colored hot springs such as Grand Prismatic Spring, and regularly erupting geysers, the best-known being Old Faithful. The yellow-hued Grand Canyon of the Yellowstone River contains several high waterfalls, while four mountain ranges traverse the park. More than 60 mammal species including gray wolves, grizzly bears, black bears, lynxes, bison, and elk, make this park one of the best wildlife viewing spots in the country.", "visitors": 4257177.0, "id": "park_yellowstone", "location": "44.6,-110.5", "square_km": 8983.2, "acres": 2219790.71, }, ], ) assert resp.status == 200 assert resp == [ {"id": "park_zion", "errors": []}, {"id": "park_yellowstone", "errors": []}, ] @pytest.mark.vcr() def test_search(app_search): resp = app_search.search( engine_name="national-parks-demo", body={"query": "tree", "page": {"size": 2}} ) assert resp.status == 200 assert resp == { "meta": { "alerts": [], "warnings": [], "page": {"current": 1, "total_pages": 12, "total_results": 23, "size": 2}, "engine": {"name": "national-parks-demo", "type": "default"}, "request_id": "4999a4ef-b750-4bef-aea2-87f54d9c87b3", }, "results": [ { "nps_link": {"raw": "https://www.nps.gov/grsm/index.htm"}, "title": {"raw": "Great Smoky Mountains"}, "date_established": {"raw": "1934-06-15T05:00:00+00:00"}, "world_heritage_site": {"raw": "true"}, "states": {"raw": ["Tennessee", "North Carolina"]}, "description": { "raw": "The Great Smoky Mountains, part of the Appalachian Mountains, span a wide range of elevations, making them home to over 400 vertebrate species, 100 tree species, and 5000 plant species. Hiking is the park's main attraction, with over 800 miles (1,300 km) of trails, including 70 miles (110 km) of the Appalachian Trail. Other activities include fishing, horseback riding, and touring nearly 80 historic structures." }, "visitors": {"raw": 11312786.0}, "_meta": { "id": "park_great-smoky-mountains", "engine": "national-parks-demo", "score": 16969184.0, }, "id": {"raw": "park_great-smoky-mountains"}, "location": {"raw": "35.68,-83.53"}, "square_km": {"raw": 2114.2}, "acres": {"raw": 522426.88}, }, { "nps_link": {"raw": "https://www.nps.gov/yose/index.htm"}, "title": {"raw": "Yosemite"}, "date_established": {"raw": "1890-10-01T05:00:00+00:00"}, "world_heritage_site": {"raw": "true"}, "states": {"raw": ["California"]}, "description": { "raw": "Yosemite features sheer granite cliffs, exceptionally tall waterfalls, and old-growth forests at a unique intersection of geology and hydrology. Half Dome and El Capitan rise from the park's centerpiece, the glacier-carved Yosemite Valley, and from its vertical walls drop Yosemite Falls, one of North America's tallest waterfalls at 2,425 feet (739 m) high. Three giant sequoia groves, along with a pristine wilderness in the heart of the Sierra Nevada, are home to a wide variety of rare plant and animal species." }, "visitors": {"raw": 5028868.0}, "_meta": { "id": "park_yosemite", "engine": "national-parks-demo", "score": 7543302.0, }, "id": {"raw": "park_yosemite"}, "location": {"raw": "37.83,-119.5"}, "square_km": {"raw": 3082.7}, "acres": {"raw": 761747.5}, }, ], } @pytest.mark.vcr() def test_not_authorized(app_search): app_search.http_auth = None with pytest.raises(UnauthorizedError) as e: app_search.list_engines() assert e.value.status == 401 assert e.value.message == {"error": "You need to sign in before continuing."}
assert e.value.errors == () resp = app_search.list_engines(ignore_status=401) assert resp.status == 401 assert resp == {"error": "You need to sign in before continuing."} def test_create_signed_search_key(): private_key = "private-" signed_key = AppSearch.create_signed_search_key( api_key=private_key, api_key_name="api-key-name", search_fields={"first_name": {}}, filters={"status": "available"}, facets=None, ) assert isinstance(signed_key, str) assert jwt.decode(signed_key, private_key, algorithms="HS256") == { "api_key_name": "api-key-name", "facets": None, "filters": {"status": "available"}, "search_fields": {"first_name": {}}, }
get_networks_network_id_dns_records_domain_parameters.go
// Code generated by go-swagger; DO NOT EDIT. package networks // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "context" "net/http" "time" "github.com/go-openapi/errors" "github.com/go-openapi/runtime" cr "github.com/go-openapi/runtime/client" strfmt "github.com/go-openapi/strfmt" ) // NewGetNetworksNetworkIDDNSRecordsDomainParams creates a new GetNetworksNetworkIDDNSRecordsDomainParams object // with the default values initialized. func NewGetNetworksNetworkIDDNSRecordsDomainParams() *GetNetworksNetworkIDDNSRecordsDomainParams { var () return &GetNetworksNetworkIDDNSRecordsDomainParams{ timeout: cr.DefaultTimeout, } } // NewGetNetworksNetworkIDDNSRecordsDomainParamsWithTimeout creates a new GetNetworksNetworkIDDNSRecordsDomainParams object // with the default values initialized, and the ability to set a timeout on a request func NewGetNetworksNetworkIDDNSRecordsDomainParamsWithTimeout(timeout time.Duration) *GetNetworksNetworkIDDNSRecordsDomainParams { var () return &GetNetworksNetworkIDDNSRecordsDomainParams{ timeout: timeout, } } // NewGetNetworksNetworkIDDNSRecordsDomainParamsWithContext creates a new GetNetworksNetworkIDDNSRecordsDomainParams object // with the default values initialized, and the ability to set a context for a request func NewGetNetworksNetworkIDDNSRecordsDomainParamsWithContext(ctx context.Context) *GetNetworksNetworkIDDNSRecordsDomainParams
// NewGetNetworksNetworkIDDNSRecordsDomainParamsWithHTTPClient creates a new GetNetworksNetworkIDDNSRecordsDomainParams object // with the default values initialized, and the ability to set a custom HTTPClient for a request func NewGetNetworksNetworkIDDNSRecordsDomainParamsWithHTTPClient(client *http.Client) *GetNetworksNetworkIDDNSRecordsDomainParams { var () return &GetNetworksNetworkIDDNSRecordsDomainParams{ HTTPClient: client, } } /*GetNetworksNetworkIDDNSRecordsDomainParams contains all the parameters to send to the API endpoint for the get networks network ID DNS records domain operation typically these are written to a http.Request */ type GetNetworksNetworkIDDNSRecordsDomainParams struct { /*Domain DNS record domain */ Domain string /*NetworkID Network ID */ NetworkID string timeout time.Duration Context context.Context HTTPClient *http.Client } // WithTimeout adds the timeout to the get networks network ID DNS records domain params func (o *GetNetworksNetworkIDDNSRecordsDomainParams) WithTimeout(timeout time.Duration) *GetNetworksNetworkIDDNSRecordsDomainParams { o.SetTimeout(timeout) return o } // SetTimeout adds the timeout to the get networks network ID DNS records domain params func (o *GetNetworksNetworkIDDNSRecordsDomainParams) SetTimeout(timeout time.Duration) { o.timeout = timeout } // WithContext adds the context to the get networks network ID DNS records domain params func (o *GetNetworksNetworkIDDNSRecordsDomainParams) WithContext(ctx context.Context) *GetNetworksNetworkIDDNSRecordsDomainParams { o.SetContext(ctx) return o } // SetContext adds the context to the get networks network ID DNS records domain params func (o *GetNetworksNetworkIDDNSRecordsDomainParams) SetContext(ctx context.Context) { o.Context = ctx } // WithHTTPClient adds the HTTPClient to the get networks network ID DNS records domain params func (o *GetNetworksNetworkIDDNSRecordsDomainParams) WithHTTPClient(client *http.Client) *GetNetworksNetworkIDDNSRecordsDomainParams { o.SetHTTPClient(client) return o } // SetHTTPClient adds the HTTPClient to the get networks network ID DNS records domain params func (o *GetNetworksNetworkIDDNSRecordsDomainParams) SetHTTPClient(client *http.Client) { o.HTTPClient = client } // WithDomain adds the domain to the get networks network ID DNS records domain params func (o *GetNetworksNetworkIDDNSRecordsDomainParams) WithDomain(domain string) *GetNetworksNetworkIDDNSRecordsDomainParams { o.SetDomain(domain) return o } // SetDomain adds the domain to the get networks network ID DNS records domain params func (o *GetNetworksNetworkIDDNSRecordsDomainParams) SetDomain(domain string) { o.Domain = domain } // WithNetworkID adds the networkID to the get networks network ID DNS records domain params func (o *GetNetworksNetworkIDDNSRecordsDomainParams) WithNetworkID(networkID string) *GetNetworksNetworkIDDNSRecordsDomainParams { o.SetNetworkID(networkID) return o } // SetNetworkID adds the networkId to the get networks network ID DNS records domain params func (o *GetNetworksNetworkIDDNSRecordsDomainParams) SetNetworkID(networkID string) { o.NetworkID = networkID } // WriteToRequest writes these params to a swagger request func (o *GetNetworksNetworkIDDNSRecordsDomainParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { if err := r.SetTimeout(o.timeout); err != nil { return err } var res []error // path param domain if err := r.SetPathParam("domain", o.Domain); err != nil { return err } // path param network_id if err := r.SetPathParam("network_id", o.NetworkID); err != nil { return err } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil }
{ var () return &GetNetworksNetworkIDDNSRecordsDomainParams{ Context: ctx, } }
packet_unpacker_test.go
package quic import ( "bytes" "errors" "time" "github.com/hugefiver/quic/internal/handshake" "github.com/hugefiver/quic/internal/mocks" "github.com/hugefiver/quic/internal/protocol" "github.com/hugefiver/quic/internal/qerr" "github.com/hugefiver/quic/internal/wire" "github.com/golang/mock/gomock" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("Packet Unpacker", func() { const version = protocol.VersionTLS var ( unpacker *packetUnpacker cs *mocks.MockCryptoSetup connID = protocol.ConnectionID{0xde, 0xad, 0xbe, 0xef} payload = []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.") ) getHeader := func(extHdr *wire.ExtendedHeader) (*wire.Header, []byte) { buf := &bytes.Buffer{} ExpectWithOffset(1, extHdr.Write(buf, version)).To(Succeed()) hdrLen := buf.Len() if extHdr.Length > protocol.ByteCount(extHdr.PacketNumberLen) { buf.Write(make([]byte, int(extHdr.Length)-int(extHdr.PacketNumberLen))) } hdr, _, _, err := wire.ParsePacket(buf.Bytes(), connID.Len()) ExpectWithOffset(1, err).ToNot(HaveOccurred()) return hdr, buf.Bytes()[:hdrLen] } BeforeEach(func() { cs = mocks.NewMockCryptoSetup(mockCtrl) unpacker = newPacketUnpacker(cs, version).(*packetUnpacker) }) It("errors when the packet is too small to obtain the header decryption sample, for long headers", func() { extHdr := &wire.ExtendedHeader{ Header: wire.Header{ IsLongHeader: true, Type: protocol.PacketTypeHandshake, DestConnectionID: connID, Version: version, }, PacketNumber: 1337, PacketNumberLen: protocol.PacketNumberLen2, } hdr, hdrRaw := getHeader(extHdr) data := append(hdrRaw, make([]byte, 2 /* fill up packet number */ +15 /* need 16 bytes */)...) opener := mocks.NewMockLongHeaderOpener(mockCtrl) cs.EXPECT().GetHandshakeOpener().Return(opener, nil) _, err := unpacker.Unpack(hdr, time.Now(), data) Expect(err).To(BeAssignableToTypeOf(&headerParseError{})) var headerErr *headerParseError Expect(errors.As(err, &headerErr)).To(BeTrue()) Expect(err).To(MatchError("Packet too small. Expected at least 20 bytes after the header, got 19")) }) It("errors when the packet is too small to obtain the header decryption sample, for short headers", func() { extHdr := &wire.ExtendedHeader{ Header: wire.Header{DestConnectionID: connID}, PacketNumber: 1337, PacketNumberLen: protocol.PacketNumberLen2, } hdr, hdrRaw := getHeader(extHdr) data := append(hdrRaw, make([]byte, 2 /* fill up packet number */ +15 /* need 16 bytes */)...) opener := mocks.NewMockShortHeaderOpener(mockCtrl) cs.EXPECT().Get1RTTOpener().Return(opener, nil) _, err := unpacker.Unpack(hdr, time.Now(), data) Expect(err).To(BeAssignableToTypeOf(&headerParseError{})) Expect(err).To(MatchError("Packet too small. Expected at least 20 bytes after the header, got 19")) }) It("opens Initial packets", func() { extHdr := &wire.ExtendedHeader{ Header: wire.Header{ IsLongHeader: true, Type: protocol.PacketTypeInitial, Length: 3 + 6, // packet number len + payload DestConnectionID: connID, Version: version, }, PacketNumber: 2, PacketNumberLen: 3, } hdr, hdrRaw := getHeader(extHdr) opener := mocks.NewMockLongHeaderOpener(mockCtrl) gomock.InOrder( cs.EXPECT().GetInitialOpener().Return(opener, nil), opener.EXPECT().DecryptHeader(gomock.Any(), gomock.Any(), gomock.Any()), opener.EXPECT().DecodePacketNumber(protocol.PacketNumber(2), protocol.PacketNumberLen3).Return(protocol.PacketNumber(1234)), opener.EXPECT().Open(gomock.Any(), payload, protocol.PacketNumber(1234), hdrRaw).Return([]byte("decrypted"), nil), ) packet, err := unpacker.Unpack(hdr, time.Now(), append(hdrRaw, payload...)) Expect(err).ToNot(HaveOccurred()) Expect(packet.encryptionLevel).To(Equal(protocol.EncryptionInitial)) Expect(packet.data).To(Equal([]byte("decrypted"))) }) It("opens 0-RTT packets", func() { extHdr := &wire.ExtendedHeader{ Header: wire.Header{ IsLongHeader: true, Type: protocol.PacketType0RTT, Length: 3 + 6, // packet number len + payload DestConnectionID: connID, Version: version, }, PacketNumber: 20, PacketNumberLen: 2, } hdr, hdrRaw := getHeader(extHdr) opener := mocks.NewMockLongHeaderOpener(mockCtrl) gomock.InOrder( cs.EXPECT().Get0RTTOpener().Return(opener, nil), opener.EXPECT().DecryptHeader(gomock.Any(), gomock.Any(), gomock.Any()), opener.EXPECT().DecodePacketNumber(protocol.PacketNumber(20), protocol.PacketNumberLen2).Return(protocol.PacketNumber(321)), opener.EXPECT().Open(gomock.Any(), payload, protocol.PacketNumber(321), hdrRaw).Return([]byte("decrypted"), nil), ) packet, err := unpacker.Unpack(hdr, time.Now(), append(hdrRaw, payload...)) Expect(err).ToNot(HaveOccurred()) Expect(packet.encryptionLevel).To(Equal(protocol.Encryption0RTT)) Expect(packet.data).To(Equal([]byte("decrypted"))) }) It("opens short header packets", func() { extHdr := &wire.ExtendedHeader{ Header: wire.Header{DestConnectionID: connID}, KeyPhase: protocol.KeyPhaseOne, PacketNumber: 99, PacketNumberLen: protocol.PacketNumberLen4, } hdr, hdrRaw := getHeader(extHdr) opener := mocks.NewMockShortHeaderOpener(mockCtrl) now := time.Now() gomock.InOrder( cs.EXPECT().Get1RTTOpener().Return(opener, nil), opener.EXPECT().DecryptHeader(gomock.Any(), gomock.Any(), gomock.Any()), opener.EXPECT().DecodePacketNumber(protocol.PacketNumber(99), protocol.PacketNumberLen4).Return(protocol.PacketNumber(321)), opener.EXPECT().Open(gomock.Any(), payload, now, protocol.PacketNumber(321), protocol.KeyPhaseOne, hdrRaw).Return([]byte("decrypted"), nil), ) packet, err := unpacker.Unpack(hdr, now, append(hdrRaw, payload...)) Expect(err).ToNot(HaveOccurred()) Expect(packet.encryptionLevel).To(Equal(protocol.Encryption1RTT)) Expect(packet.data).To(Equal([]byte("decrypted"))) }) It("returns the error when getting the sealer fails", func() { extHdr := &wire.ExtendedHeader{ Header: wire.Header{DestConnectionID: connID}, PacketNumber: 0x1337, PacketNumberLen: 2, } hdr, hdrRaw := getHeader(extHdr) cs.EXPECT().Get1RTTOpener().Return(nil, handshake.ErrKeysNotYetAvailable) _, err := unpacker.Unpack(hdr, time.Now(), append(hdrRaw, payload...)) Expect(err).To(MatchError(handshake.ErrKeysNotYetAvailable)) }) It("returns the error when unpacking fails", func() { extHdr := &wire.ExtendedHeader{ Header: wire.Header{ IsLongHeader: true, Type: protocol.PacketTypeHandshake, Length: 3, // packet number len DestConnectionID: connID, Version: version, }, PacketNumber: 2, PacketNumberLen: 3, } hdr, hdrRaw := getHeader(extHdr) opener := mocks.NewMockLongHeaderOpener(mockCtrl) cs.EXPECT().GetHandshakeOpener().Return(opener, nil) opener.EXPECT().DecryptHeader(gomock.Any(), gomock.Any(), gomock.Any()) opener.EXPECT().DecodePacketNumber(gomock.Any(), gomock.Any()) unpackErr := &qerr.TransportError{ErrorCode: qerr.CryptoBufferExceeded} opener.EXPECT().Open(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, unpackErr) _, err := unpacker.Unpack(hdr, time.Now(), append(hdrRaw, payload...)) Expect(err).To(MatchError(unpackErr)) }) It("defends against the timing side-channel when the reserved bits are wrong, for long header packets", func() { extHdr := &wire.ExtendedHeader{ Header: wire.Header{ IsLongHeader: true, Type: protocol.PacketTypeHandshake, DestConnectionID: connID, Version: version, }, PacketNumber: 0x1337, PacketNumberLen: 2, } hdr, hdrRaw := getHeader(extHdr) hdrRaw[0] |= 0xc opener := mocks.NewMockLongHeaderOpener(mockCtrl) opener.EXPECT().DecryptHeader(gomock.Any(), gomock.Any(), gomock.Any()) cs.EXPECT().GetHandshakeOpener().Return(opener, nil) opener.EXPECT().DecodePacketNumber(gomock.Any(), gomock.Any()) opener.EXPECT().Open(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("payload"), nil) _, err := unpacker.Unpack(hdr, time.Now(), append(hdrRaw, payload...)) Expect(err).To(MatchError(wire.ErrInvalidReservedBits)) }) It("defends against the timing side-channel when the reserved bits are wrong, for short header packets", func() { extHdr := &wire.ExtendedHeader{ Header: wire.Header{DestConnectionID: connID}, PacketNumber: 0x1337, PacketNumberLen: 2, } hdr, hdrRaw := getHeader(extHdr) hdrRaw[0] |= 0x18 opener := mocks.NewMockShortHeaderOpener(mockCtrl) opener.EXPECT().DecryptHeader(gomock.Any(), gomock.Any(), gomock.Any()) cs.EXPECT().Get1RTTOpener().Return(opener, nil) opener.EXPECT().DecodePacketNumber(gomock.Any(), gomock.Any()) opener.EXPECT().Open(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("payload"), nil) _, err := unpacker.Unpack(hdr, time.Now(), append(hdrRaw, payload...)) Expect(err).To(MatchError(wire.ErrInvalidReservedBits)) }) It("returns the decryption error, when unpacking a packet with wrong reserved bits fails", func() { extHdr := &wire.ExtendedHeader{ Header: wire.Header{DestConnectionID: connID}, PacketNumber: 0x1337, PacketNumberLen: 2, } hdr, hdrRaw := getHeader(extHdr) hdrRaw[0] |= 0x18 opener := mocks.NewMockShortHeaderOpener(mockCtrl) opener.EXPECT().DecryptHeader(gomock.Any(), gomock.Any(), gomock.Any()) cs.EXPECT().Get1RTTOpener().Return(opener, nil) opener.EXPECT().DecodePacketNumber(gomock.Any(), gomock.Any()) opener.EXPECT().Open(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, handshake.ErrDecryptionFailed) _, err := unpacker.Unpack(hdr, time.Now(), append(hdrRaw, payload...))
Expect(err).To(MatchError(handshake.ErrDecryptionFailed)) }) It("decrypts the header", func() { extHdr := &wire.ExtendedHeader{ Header: wire.Header{ IsLongHeader: true, Type: protocol.PacketTypeHandshake, Length: 3, // packet number len DestConnectionID: connID, Version: version, }, PacketNumber: 0x1337, PacketNumberLen: 2, } hdr, hdrRaw := getHeader(extHdr) origHdrRaw := append([]byte{}, hdrRaw...) // save a copy of the header firstHdrByte := hdrRaw[0] hdrRaw[0] ^= 0xff // invert the first byte hdrRaw[len(hdrRaw)-2] ^= 0xff // invert the packet number hdrRaw[len(hdrRaw)-1] ^= 0xff // invert the packet number Expect(hdrRaw[0]).ToNot(Equal(firstHdrByte)) opener := mocks.NewMockLongHeaderOpener(mockCtrl) cs.EXPECT().GetHandshakeOpener().Return(opener, nil) gomock.InOrder( // we're using a 2 byte packet number, so the sample starts at the 3rd payload byte opener.EXPECT().DecryptHeader( []byte{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}, &hdrRaw[0], append(hdrRaw[len(hdrRaw)-2:], []byte{1, 2}...)).Do(func(_ []byte, firstByte *byte, pnBytes []byte) { *firstByte ^= 0xff // invert the first byte back for i := range pnBytes { pnBytes[i] ^= 0xff // invert the packet number bytes } }), opener.EXPECT().DecodePacketNumber(protocol.PacketNumber(0x1337), protocol.PacketNumberLen2).Return(protocol.PacketNumber(0x7331)), opener.EXPECT().Open(gomock.Any(), gomock.Any(), protocol.PacketNumber(0x7331), origHdrRaw).Return([]byte{0}, nil), ) data := hdrRaw for i := 1; i <= 100; i++ { data = append(data, uint8(i)) } packet, err := unpacker.Unpack(hdr, time.Now(), data) Expect(err).ToNot(HaveOccurred()) Expect(packet.packetNumber).To(Equal(protocol.PacketNumber(0x7331))) }) })
meta.py
# Copyright 2019 TerraPower, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Metadata describing an ARMI distribution. """ # duplicating with setup.py for now. This is because in order to import meta.py, we
# need to run armi.__init__, which does a whole heck of a lot of stuff that setup.py # shouldn't need. We should clean this up in the future. __version__ = "0.2.0"
add-cluster.component.ts
// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. import { Component, OnInit, OnDestroy } from '@angular/core'; import { Subscription } from 'rxjs'; import { take } from 'rxjs/operators'; import { clearEmptyField, StackBase } from '@app/core/types'; import { BaseFormDirective } from './base-form.directive'; import { ICluster } from '@app/models/cluster'; @Component({ selector: 'app-add-cluster', template: ` <ng-container [formGroup]="form"> <app-bundles [form]="form" [typeName]="'cluster'" (prototypeChanged)="prototypeChanged($event)"></app-bundles> <app-input [form]="form" [label]="'Cluster name'" [controlName]="'name'" [isRequired]="true"></app-input> <app-input [form]="form" [label]="'Description'" [controlName]="'description'"></app-input> <app-add-controls [disabled]="!form.valid" (cancel)="onCancel()" (save)="save()"></app-add-controls> </ng-container> `, }) export class
extends BaseFormDirective implements OnInit, OnDestroy { sgn: Subscription; private prototype: StackBase; ngOnInit() { this.form = this.service.model('cluster').form; this.sgn = this.service.genName(this.form); } ngOnDestroy() { this.sgn.unsubscribe(); } prototypeChanged(event: StackBase) { this.prototype = event; } save() { const data = clearEmptyField(this.form.value); this.service .add<ICluster>(data, 'cluster', this.prototype) .pipe(take(1)) .subscribe((_) => this.onCancel()); } }
AddClusterComponent
localenv.rs
use lazy_static::lazy_static; use log::debug; use std::path::Path; use std::{env, fs}; use serde_derive::*; use toml; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); /// Defines and parses CLI argument for this server. pub fn parse_cli_args<'a>() -> clap::ArgMatches<'a> { clap::App::new("qaruntime-rs") .version(VERSION) .arg( clap::Arg::with_name("config") .required(false) .help("Path to configuration file") .index(1), ) .get_matches() } /// Parses CLI arguments, finds location of config file, and parses config file into a struct. pub fn parse_config_from_cli_args(matches: &clap::ArgMatches) -> Config { let conf = match matches.value_of("config") { Some(config_path) => match Config::from_file(config_path) { Ok(config) => config, Err(msg) => { eprintln!("Failed to parse config file {}: {}", config_path, msg); std::process::exit(1); } }, None => { eprintln!("No config file specified, append toml file"); std::process::exit(1); } }; conf } #[derive(Clone, Debug, Default, Deserialize)] pub struct Config { pub clickhouse: ClickhouseConfig, pub account: MongoConfig, pub hisdata: MongoConfig, pub order: MQConfig, pub realtime: MQConfig, pub redis: RedisConfig, pub accsetup: AccountConfig, pub cli: Cli, pub backtest: Backtest, pub instruct: MQConfig, pub ack: MQConfig, } impl Config { /// Read configuration from a file into a new Config struct. pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self, String> { let path = path.as_ref(); debug!("Reading configuration from {}", path.display()); let data = match fs::read_to_string(path) { Ok(data) => data, Err(err) => return Err(err.to_string()), }; let conf: Config = match toml::from_str(&data) { Ok(conf) => conf, Err(err) => return Err(err.to_string()), }; Ok(conf) } } #[derive(Clone, Debug, Deserialize)] #[serde(default)] pub struct MongoConfig { pub uri: String, pub db: String, } impl Default for MongoConfig { fn default() -> Self { Self { uri: "mongodb://localhost:27017".to_owned(), db: "quantaxis".to_owned(), } }
pub struct MQConfig { pub uri: String, pub exchange: String, pub routing_key: String, } impl Default for MQConfig { fn default() -> Self { Self { uri: "amqp://admin:admin@localhost:5672/".to_owned(), exchange: "".to_owned(), routing_key: "default".to_owned(), } } } #[derive(Clone, Debug, Deserialize)] #[serde(default)] pub struct RedisConfig { pub uri: String, } impl Default for RedisConfig { fn default() -> Self { Self { uri: "redis://localhost:6379".to_owned(), } } } #[derive(Clone, Debug, Deserialize)] #[serde(default)] pub struct ClickhouseConfig { pub uri: String, } impl Default for ClickhouseConfig { fn default() -> Self { Self { uri: "tcp://[email protected]:9000?compression=lz4&ping_timeout=42ms".to_owned(), } } } #[derive(Clone, Debug, Deserialize)] #[serde(default)] pub struct AccountConfig { pub cash_map: String, pub multiply: f64, pub default: f64, pub symbol: String, } impl Default for AccountConfig { fn default() -> Self { Self { cash_map: "{}".to_owned(), multiply: 2.0, default: 50000.0, symbol: "KTKS".to_owned(), } } } #[derive(Clone, Debug, Deserialize)] #[serde(default)] pub struct Cli { pub name: Vec<String>, pub codes: Vec<String>, pub freqs: Vec<String>, pub params: String, pub log_path: String, } impl Default for Cli { fn default() -> Self { Self { name: vec!["t00".to_owned()], codes: vec!["rb2010".to_owned()], freqs: vec!["1min".to_owned()], params: "{\"\":\"\"}".to_owned(), log_path: "log/qaruntime.log".to_owned(), } } } #[derive(Clone, Debug, Deserialize)] #[serde(default)] pub struct Backtest { pub start: String, } impl Default for Backtest { fn default() -> Self { Self { start: "2020-01-01 09:00:00".to_owned(), } } } pub fn new_config() -> Config { let _args: Vec<String> = env::args().collect(); let cfg: Config = parse_config_from_cli_args(&parse_cli_args()); cfg } lazy_static! { pub static ref CONFIG: Config = new_config(); }
} #[derive(Clone, Debug, Deserialize)] #[serde(default)]
cifar.py
from __future__ import print_function from PIL import Image import os import os.path import numpy as np import sys import pickle import torch import torch.utils.data as data from itertools import permutations class VisionDataset(data.Dataset): _repr_indent = 4 def __init__(self, root, transforms=None, transform=None, target_transform=None): if isinstance(root, torch._six.string_classes): root = os.path.expanduser(root) self.root = root has_transforms = transforms is not None has_separate_transform = transform is not None or target_transform is not None if has_transforms and has_separate_transform: raise ValueError("Only transforms or transform/target_transform can " "be passed as argument") # for backwards-compatibility self.transform = transform self.target_transform = target_transform if has_separate_transform: transforms = StandardTransform(transform, target_transform) self.transforms = transforms def __getitem__(self, index): raise NotImplementedError def __len__(self): raise NotImplementedError def __repr__(self): head = "Dataset " + self.__class__.__name__ body = ["Number of datapoints: {}".format(self.__len__())] if self.root is not None: body.append("Root location: {}".format(self.root)) body += self.extra_repr().splitlines() if self.transforms is not None: body += [repr(self.transforms)] lines = [head] + [" " * self._repr_indent + line for line in body] return '\n'.join(lines) def _format_transform_repr(self, transform, head): lines = transform.__repr__().splitlines() return (["{}{}".format(head, lines[0])] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]) def extra_repr(self): return "" class CIFAR10(VisionDataset): base_folder = 'cifar-10-batches-py' url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz" filename = "cifar-10-python.tar.gz" tgz_md5 = 'c58f30108f718f92721af3b95e74349a' train_list = [ ['data_batch_1', 'c99cafc152244af753f735de768cd75f'], ['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'], ['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'], ['data_batch_4', '634d18415352ddfa80567beed471001a'], ['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'], ] test_list = [ ['test_batch', '40351d587109b95175f43aff81a1287e'], ] meta = { 'filename': 'batches.meta', 'key': 'label_names', 'md5': '5ff9c542aee3614f3951f8cda6e48888', } def __init__(self, root, train=True, transform=None, download=False): super(CIFAR10, self).__init__(root) self.transform = transform self.train = train # training set or test set if download: raise ValueError('cannot download.') exit() #self.download() #if not self._check_integrity(): # raise RuntimeError('Dataset not found or corrupted.' + # ' You can use download=True to download it') if self.train: downloaded_list = self.train_list else: downloaded_list = self.test_list self.data = [] self.targets = [] # now load the picked numpy arrays for file_name, checksum in downloaded_list: file_path = os.path.join(self.root, self.base_folder, file_name) with open(file_path, 'rb') as f: if sys.version_info[0] == 2: entry = pickle.load(f) else: entry = pickle.load(f, encoding='latin1') self.data.append(entry['data']) if 'labels' in entry: self.targets.extend(entry['labels']) else: self.targets.extend(entry['fine_labels']) self.data = np.vstack(self.data).reshape(-1, 3, 32, 32) self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC self._load_meta() def _load_meta(self): path = os.path.join(self.root, self.base_folder, self.meta['filename']) #if not check_integrity(path, self.meta['md5']): # raise RuntimeError('Dataset metadata file not found or corrupted.' + # ' You can use download=True to download it') with open(path, 'rb') as infile: if sys.version_info[0] == 2: data = pickle.load(infile) else: data = pickle.load(infile, encoding='latin1') self.classes = data[self.meta['key']] self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)} def __getitem__(self, index): img, target = self.data[index], self.targets[index] if self.train: if np.random.rand() < 0.5: img = img[:,::-1,:] img0 = np.rot90(img, 0).copy() img0 = Image.fromarray(img0) img0 = self.transform(img0) img1 = np.rot90(img, 1).copy() img1 = Image.fromarray(img1) img1 = self.transform(img1) img2 = np.rot90(img, 2).copy() img2 = Image.fromarray(img2) img2 = self.transform(img2) img3 = np.rot90(img, 3).copy() img3 = Image.fromarray(img3) img3 = self.transform(img3) img = torch.stack([img0,img1,img2,img3]) return img, target def __len__(self): return len(self.data) def _check_integrity(self): root = self.root for fentry in (self.train_list + self.test_list): filename, md5 = fentry[0], fentry[1] fpath = os.path.join(root, self.base_folder, filename) if not check_integrity(fpath, md5): return False return True def
(self): import tarfile if self._check_integrity(): print('Files already downloaded and verified') return download_url(self.url, self.root, self.filename, self.tgz_md5) # extract file with tarfile.open(os.path.join(self.root, self.filename), "r:gz") as tar: tar.extractall(path=self.root) def extra_repr(self): return "Split: {}".format("Train" if self.train is True else "Test") class CIFAR100(CIFAR10): """`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset. This is a subclass of the `CIFAR10` Dataset. """ base_folder = 'cifar-100-python' url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz" filename = "cifar-100-python.tar.gz" tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85' train_list = [ ['train', '16019d7e3df5f24257cddd939b257f8d'], ] test_list = [ ['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'], ] meta = { 'filename': 'meta', 'key': 'fine_label_names', 'md5': '7973b15100ade9c7d40fb424638fde48', }
download
api_interfaces.go
package api2go import ( "net/http" "github.com/ejdem86/api2go/jsonapi" ) // The ResourceGetter interface MUST be implemented in order to generate the single GET route and related type ResourceGetter interface { // FindOne returns an object by its ID // Possible Responder success status code 200 FindOne(ID string, req Request) (Responder, error) } // The CRUD interface embed all interfaces at once: `ResourceCreator`, `ResourceDeleter`, `ResourceUpdater` (which includes `ResourceGetter`) type CRUD interface { ResourceCreator ResourceDeleter ResourceUpdater } // The ResourceCreator interface MUST be implemented in order to generate the POST route type ResourceCreator interface { // Create a new object. Newly created object/struct must be in Responder. // Possible Responder status codes are: // - 201 Created: Resource was created and needs to be returned // - 202 Accepted: Processing is delayed, return nothing // - 204 No Content: Resource created with a client generated ID, and no fields were modified by // the server Create(obj interface{}, req Request) (Responder, error) } // The ResourceDeleter interface MUST be implemented in order to generate the DELETE route type ResourceDeleter interface { // Delete an object // Possible Responder status codes are: // - 200 OK: Deletion was a success, returns meta information, currently not implemented! Do not use this // - 202 Accepted: Processing is delayed, return nothing // - 204 No Content: Deletion was successful, return nothing Delete(id string, req Request) (Responder, error) } // The ResourceUpdater interface MUST be implemented in order to generate the PATCH/PUT routes type ResourceUpdater interface { // ResourceGetter must be implemented along with ResourceUpdater so that api2go can retrieve the single resource before update ResourceGetter // Update an object // Possible Responder status codes are: // - 200 OK: Update successful, however some field(s) were changed, returns updates source // - 202 Accepted: Processing is delayed, return nothing // - 204 No Content: Update was successful, no fields were changed by the server, return nothing Update(obj interface{}, req Request) (Responder, error) } // Pagination represents information needed to return pagination links
Prev map[string]string First map[string]string Last map[string]string } // The PaginatedFindAll interface can be optionally implemented to fetch a subset of all records. // Pagination query parameters must be used to limit the result. Pagination URLs will automatically // be generated by the api. You can use a combination of the following 2 query parameters: // page[number] AND page[size] // OR page[offset] AND page[limit] type PaginatedFindAll interface { PaginatedFindAll(req Request) (totalCount uint, response Responder, err error) } // The FindAll interface can be optionally implemented to fetch all records at once. type FindAll interface { // FindAll returns all objects FindAll(req Request) (Responder, error) } // The ObjectInitializer interface can be implemented to have the ability to change // a created object before Unmarshal is called. This is currently only called on // Create as the other actions go through FindOne or FindAll which are already // controlled by the implementer. type ObjectInitializer interface { InitializeObject(interface{}) } //URLResolver allows you to implement a static //way to return a baseURL for all incoming //requests for one api2go instance. type URLResolver interface { GetBaseURL() string } // RequestAwareURLResolver allows you to dynamically change // generated urls. // // This is particulary useful if you have the same // API answering to multiple domains, or subdomains // e.g customer[1,2,3,4].yourapi.example.com // // SetRequest will always be called prior to // the GetBaseURL() from `URLResolver` so you // have to change the result value based on the last // request. type RequestAwareURLResolver interface { URLResolver SetRequest(http.Request) } // The Responder interface is used by all Resource Methods as a container for the Response. // Metadata is additional Metadata. You can put anything you like into it, see jsonapi spec. // Result returns the actual payload. For FindOne, put only one entry in it. // StatusCode sets the http status code. type Responder interface { Metadata() map[string]interface{} Result() interface{} StatusCode() int } // The LinksResponder interface may be used when the response object is able to return // a set of links for the top-level response object. type LinksResponder interface { Responder Links(*http.Request, string) jsonapi.Links }
type Pagination struct { Next map[string]string
auth.js
'use strict'; const { User } = require('../../../models'), { secret_jwt } = require('../../../config/server'), jwt = require('jsonwebtoken') module.exports = { signin: async (bodyData) => {
error: true, data: { message: 'Email not found.' } } } else { if (User.compareHash(user, bodyData.password)) { // Authenticated return { error: false, data: { user: { id: user.id, email: user.email } } } } else { console.log(`Wrong password with ${user.email}.`) return { error: true, data: { message: `Wrong password with ${user.email}` } } } } }, getUserFromToken(token) { const cleanTkn = token.split(' ')[1] return jwt.decode(cleanTkn).user } }
const user = await User.findOne({ where: { email: bodyData.email }, raw: true }) if (!user) { console.log('User not found.') return {
kind.rs
use std::fmt; use crate::Status; /// Trait alias for types that programmatically specify the status. /// /// For prototyping, see [`Unkind`]. /// /// # Example /// /// ```rust /// use status::Kind; /// /// #[derive(Copy, Clone, Debug, derive_more::Display)] /// enum ErrorKind { /// #[display(fmt = "Failed to read file")] /// Read, /// #[display(fmt = "Failed to parse")] /// Parse, /// } /// type Status = status::Status<ErrorKind>; /// type Result<T, E = Status> = std::result::Result<T, E>; /// /// fn read_file() -> Result<()> { /// return ErrorKind::Read.into_err(); /// } /// ``` pub trait Kind: Copy + Clone + fmt::Display + fmt::Debug + Send + Sync + 'static { /// Convenience for creating an error. fn
<C: crate::Context>(self) -> Status<Self, C> { Status::new(self) } /// Convenience for returning an error. fn into_err<T, C: crate::Context>(self) -> Result<T, Status<Self, C>> { Err(Status::new(self)) } } impl<U> Kind for U where U: Copy + Clone + fmt::Display + fmt::Debug + Send + Sync + 'static {} /// Adhoc [`Kind`]. /// /// Unlike most [`Kind`]s, this is meant to be opaque and not programmatically specify the status. /// It is only good for displaying a `str` to the user when prototyping before one transitions to more formal [`Kind`]s. /// /// Note: This is the default [`Kind`] for [`Status`]. /// /// When transitioning to a more useful [`Kind`], it could be helpful to have an `enum` variant /// with an `Unkind`: /// ``` /// #[derive(Copy, Clone, Debug, derive_more::Display)] /// enum ErrorKind { /// #[display(fmt = "Failed to read file")] /// Read, /// #[display(fmt = "Failed to parse")] /// Parse, /// #[display(fmt = "{}", "_0")] /// Other(status::Unkind), /// } /// ``` #[derive(Copy, Clone, Debug)] pub struct Unkind { inner: &'static str, } impl From<&'static str> for Unkind { fn from(s: &'static str) -> Self { Self { inner: s } } } impl fmt::Display for Unkind { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "{}", self.inner) } } #[cfg(test)] mod test { use super::*; use static_assertions::*; #[test] fn unkind() { assert_impl_all!(Unkind: Kind); } }
into_status
__init__.py
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation class ProxyGenericForeignKey(GenericForeignKey): def __init__(self, *args, **kwargs):
class ProxyGenericRelation(GenericRelation): def __init__(self, *args, **kwargs): kwargs['for_concrete_model'] = False super(ProxyGenericRelation, self).__init__(*args, **kwargs) class WidgyGenericForeignKey(ProxyGenericForeignKey): def __get__(self, instance, instance_type=None): try: return super(WidgyGenericForeignKey, self).__get__(instance, instance_type) except AttributeError: # The model for this content type couldn't be loaded. Use an # UnknownWidget instead. from widgy.models import UnknownWidget ret = UnknownWidget(getattr(instance, self.ct_field), getattr(instance, self.fk_field), instance) ret.node = instance ret.warn() return ret
kwargs['for_concrete_model'] = False super(ProxyGenericForeignKey, self).__init__(*args, **kwargs)
error.rs
//! Error and Result module use std::{error::Error as StdError, fmt, io, str::Utf8Error, string::FromUtf8Error}; use derive_more::{Display, Error, From}; use http::{uri::InvalidUri, StatusCode}; use crate::{ body::{AnyBody, Body}, ws, Response, }; pub use http::Error as HttpError; pub struct Error { inner: Box<ErrorInner>, } pub(crate) struct ErrorInner { #[allow(dead_code)] kind: Kind, cause: Option<Box<dyn StdError>>, } impl Error { fn new(kind: Kind) -> Self { Self { inner: Box::new(ErrorInner { kind, cause: None }), } } pub(crate) fn new_http() -> Self { Self::new(Kind::Http) } pub(crate) fn new_parse() -> Self { Self::new(Kind::Parse) } pub(crate) fn new_payload() -> Self { Self::new(Kind::Payload) } pub(crate) fn new_body() -> Self { Self::new(Kind::Body) } pub(crate) fn new_send_response() -> Self { Self::new(Kind::SendResponse) } // TODO: remove allow #[allow(dead_code)] pub(crate) fn new_io() -> Self { Self::new(Kind::Io) } // used in encoder behind feature flag so ignore unused warning #[allow(unused)] pub(crate) fn new_encoder() -> Self { Self::new(Kind::Encoder) } pub(crate) fn new_ws() -> Self { Self::new(Kind::Ws) } pub(crate) fn with_cause(mut self, cause: impl Into<Box<dyn StdError>>) -> Self { self.inner.cause = Some(cause.into()); self } } impl From<Error> for Response<AnyBody> { fn from(err: Error) -> Self { let status_code = match err.inner.kind { Kind::Parse => StatusCode::BAD_REQUEST, _ => StatusCode::INTERNAL_SERVER_ERROR, }; Response::new(status_code).set_body(Body::from(err.to_string())) } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Display)] pub enum Kind { #[display(fmt = "error processing HTTP")] Http, #[display(fmt = "error parsing HTTP message")] Parse, #[display(fmt = "request payload read error")] Payload, #[display(fmt = "response body write error")] Body, #[display(fmt = "send response error")] SendResponse, #[display(fmt = "error in WebSocket process")] Ws, #[display(fmt = "connection error")] Io, #[display(fmt = "encoder error")] Encoder, } impl fmt::Debug for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // TODO: more detail f.write_str("actix_http::Error") } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.inner.cause.as_ref() { Some(err) => write!(f, "{}: {}", &self.inner.kind, err), None => write!(f, "{}", &self.inner.kind), } } } impl StdError for Error { fn source(&self) -> Option<&(dyn StdError + 'static)> { self.inner.cause.as_ref().map(Box::as_ref) } } impl From<std::convert::Infallible> for Error { fn from(err: std::convert::Infallible) -> Self { match err {} } } impl From<ws::ProtocolError> for Error { fn from(err: ws::ProtocolError) -> Self { Self::new_ws().with_cause(err) } } impl From<HttpError> for Error { fn from(err: HttpError) -> Self { Self::new_http().with_cause(err) } } impl From<ws::HandshakeError> for Error { fn from(err: ws::HandshakeError) -> Self { Self::new_ws().with_cause(err) } } /// A set of errors that can occur during parsing HTTP streams. #[derive(Debug, Display, Error)] #[non_exhaustive] pub enum ParseError { /// An invalid `Method`, such as `GE.T`. #[display(fmt = "Invalid Method specified")] Method, /// An invalid `Uri`, such as `exam ple.domain`. #[display(fmt = "Uri error: {}", _0)] Uri(InvalidUri), /// An invalid `HttpVersion`, such as `HTP/1.1` #[display(fmt = "Invalid HTTP version specified")] Version, /// An invalid `Header`. #[display(fmt = "Invalid Header provided")] Header, /// A message head is too large to be reasonable. #[display(fmt = "Message head is too large")] TooLarge, /// A message reached EOF, but is not complete. #[display(fmt = "Message is incomplete")] Incomplete, /// An invalid `Status`, such as `1337 ELITE`. #[display(fmt = "Invalid Status provided")] Status, /// A timeout occurred waiting for an IO event. #[allow(dead_code)] #[display(fmt = "Timeout")] Timeout, /// An `io::Error` that occurred while trying to read or write to a network stream. #[display(fmt = "IO error: {}", _0)] Io(io::Error), /// Parsing a field as string failed. #[display(fmt = "UTF8 error: {}", _0)] Utf8(Utf8Error), } impl From<io::Error> for ParseError { fn from(err: io::Error) -> ParseError { ParseError::Io(err) } } impl From<InvalidUri> for ParseError { fn from(err: InvalidUri) -> ParseError { ParseError::Uri(err) } } impl From<Utf8Error> for ParseError { fn from(err: Utf8Error) -> ParseError { ParseError::Utf8(err) } } impl From<FromUtf8Error> for ParseError { fn from(err: FromUtf8Error) -> ParseError { ParseError::Utf8(err.utf8_error()) } } impl From<httparse::Error> for ParseError { fn from(err: httparse::Error) -> ParseError { match err { httparse::Error::HeaderName | httparse::Error::HeaderValue | httparse::Error::NewLine | httparse::Error::Token => ParseError::Header, httparse::Error::Status => ParseError::Status, httparse::Error::TooManyHeaders => ParseError::TooLarge, httparse::Error::Version => ParseError::Version, } } } impl From<ParseError> for Error { fn from(err: ParseError) -> Self
} impl From<ParseError> for Response<AnyBody> { fn from(err: ParseError) -> Self { Error::from(err).into() } } /// A set of errors that can occur running blocking tasks in thread pool. #[derive(Debug, Display, Error)] #[display(fmt = "Blocking thread pool is gone")] pub struct BlockingError; /// A set of errors that can occur during payload parsing. #[derive(Debug, Display)] #[non_exhaustive] pub enum PayloadError { /// A payload reached EOF, but is not complete. #[display( fmt = "A payload reached EOF, but is not complete. Inner error: {:?}", _0 )] Incomplete(Option<io::Error>), /// Content encoding stream corruption. #[display(fmt = "Can not decode content-encoding.")] EncodingCorrupted, /// Payload reached size limit. #[display(fmt = "Payload reached size limit.")] Overflow, /// Payload length is unknown. #[display(fmt = "Payload length is unknown.")] UnknownLength, /// HTTP/2 payload error. #[display(fmt = "{}", _0)] Http2Payload(h2::Error), /// Generic I/O error. #[display(fmt = "{}", _0)] Io(io::Error), } impl std::error::Error for PayloadError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { PayloadError::Incomplete(None) => None, PayloadError::Incomplete(Some(err)) => Some(err as &dyn std::error::Error), PayloadError::EncodingCorrupted => None, PayloadError::Overflow => None, PayloadError::UnknownLength => None, PayloadError::Http2Payload(err) => Some(err as &dyn std::error::Error), PayloadError::Io(err) => Some(err as &dyn std::error::Error), } } } impl From<h2::Error> for PayloadError { fn from(err: h2::Error) -> Self { PayloadError::Http2Payload(err) } } impl From<Option<io::Error>> for PayloadError { fn from(err: Option<io::Error>) -> Self { PayloadError::Incomplete(err) } } impl From<io::Error> for PayloadError { fn from(err: io::Error) -> Self { PayloadError::Incomplete(Some(err)) } } impl From<BlockingError> for PayloadError { fn from(_: BlockingError) -> Self { PayloadError::Io(io::Error::new( io::ErrorKind::Other, "Operation is canceled", )) } } impl From<PayloadError> for Error { fn from(err: PayloadError) -> Self { Self::new_payload().with_cause(err) } } /// A set of errors that can occur during dispatching HTTP requests. #[derive(Debug, Display, Error, From)] #[non_exhaustive] pub enum DispatchError { /// Service error // FIXME: display and error type #[display(fmt = "Service Error")] Service(#[error(not(source))] Response<AnyBody>), /// Body error // FIXME: display and error type #[display(fmt = "Body Error")] Body(#[error(not(source))] Box<dyn StdError>), /// Upgrade service error Upgrade, /// An `io::Error` that occurred while trying to read or write to a network stream. #[display(fmt = "IO error: {}", _0)] Io(io::Error), /// Http request parse error. #[display(fmt = "Parse error: {}", _0)] Parse(ParseError), /// Http/2 error #[display(fmt = "{}", _0)] H2(h2::Error), /// The first request did not complete within the specified timeout. #[display(fmt = "The first request did not complete within the specified timeout")] SlowRequestTimeout, /// Disconnect timeout. Makes sense for ssl streams. #[display(fmt = "Connection shutdown timeout")] DisconnectTimeout, /// Payload is not consumed #[display(fmt = "Task is completed but request's payload is not consumed")] PayloadIsNotConsumed, /// Malformed request #[display(fmt = "Malformed request")] MalformedRequest, /// Internal error #[display(fmt = "Internal error")] InternalError, /// Unknown error #[display(fmt = "Unknown error")] Unknown, } /// A set of error that can occur during parsing content type. #[derive(Debug, Display, Error)] #[non_exhaustive] pub enum ContentTypeError { /// Can not parse content type #[display(fmt = "Can not parse content type")] ParseError, /// Unknown content encoding #[display(fmt = "Unknown content encoding")] UnknownEncoding, } #[cfg(test)] mod content_type_test_impls { use super::*; impl std::cmp::PartialEq for ContentTypeError { fn eq(&self, other: &Self) -> bool { match self { Self::ParseError => matches!(other, ContentTypeError::ParseError), Self::UnknownEncoding => { matches!(other, ContentTypeError::UnknownEncoding) } } } } } #[cfg(test)] mod tests { use super::*; use http::{Error as HttpError, StatusCode}; use std::io; #[test] fn test_into_response() { let resp: Response<AnyBody> = ParseError::Incomplete.into(); assert_eq!(resp.status(), StatusCode::BAD_REQUEST); let err: HttpError = StatusCode::from_u16(10000).err().unwrap().into(); let resp: Response<AnyBody> = Error::new_http().with_cause(err).into(); assert_eq!(resp.status(), StatusCode::INTERNAL_SERVER_ERROR); } #[test] fn test_as_response() { let orig = io::Error::new(io::ErrorKind::Other, "other"); let err: Error = ParseError::Io(orig).into(); assert_eq!( format!("{}", err), "error parsing HTTP message: IO error: other" ); } #[test] fn test_error_display() { let orig = io::Error::new(io::ErrorKind::Other, "other"); let err = Error::new_io().with_cause(orig); assert_eq!("connection error: other", err.to_string()); } #[test] fn test_error_http_response() { let orig = io::Error::new(io::ErrorKind::Other, "other"); let err = Error::new_io().with_cause(orig); let resp: Response<AnyBody> = err.into(); assert_eq!(resp.status(), StatusCode::INTERNAL_SERVER_ERROR); } #[test] fn test_payload_error() { let err: PayloadError = io::Error::new(io::ErrorKind::Other, "ParseError").into(); assert!(err.to_string().contains("ParseError")); let err = PayloadError::Incomplete(None); assert_eq!( err.to_string(), "A payload reached EOF, but is not complete. Inner error: None" ); } macro_rules! from { ($from:expr => $error:pat) => { match ParseError::from($from) { err @ $error => { assert!(err.to_string().len() >= 5); } err => unreachable!("{:?}", err), } }; } macro_rules! from_and_cause { ($from:expr => $error:pat) => { match ParseError::from($from) { e @ $error => { let desc = format!("{}", e); assert_eq!(desc, format!("IO error: {}", $from)); } _ => unreachable!("{:?}", $from), } }; } #[test] fn test_from() { from_and_cause!(io::Error::new(io::ErrorKind::Other, "other") => ParseError::Io(..)); from!(httparse::Error::HeaderName => ParseError::Header); from!(httparse::Error::HeaderName => ParseError::Header); from!(httparse::Error::HeaderValue => ParseError::Header); from!(httparse::Error::NewLine => ParseError::Header); from!(httparse::Error::Status => ParseError::Status); from!(httparse::Error::Token => ParseError::Header); from!(httparse::Error::TooManyHeaders => ParseError::TooLarge); from!(httparse::Error::Version => ParseError::Version); } }
{ Self::new_parse().with_cause(err) }
matrix_scfg.rs
#[doc = "Reader of register MATRIX_SCFG[%s]"] pub type R = crate::R<u32, super::MATRIX_SCFG>; #[doc = "Writer for register MATRIX_SCFG[%s]"] pub type W = crate::W<u32, super::MATRIX_SCFG>; #[doc = "Register MATRIX_SCFG[%s] `reset()`'s with value 0"] impl crate::ResetValue for super::MATRIX_SCFG { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `SLOT_CYCLE`"] pub type SLOT_CYCLE_R = crate::R<u16, u16>; #[doc = "Write proxy for field `SLOT_CYCLE`"] pub struct SLOT_CYCLE_W<'a> { w: &'a mut W, } impl<'a> SLOT_CYCLE_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !0x01ff) | ((value as u32) & 0x01ff); self.w } } #[doc = "Default Master Type\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DEFMSTR_TYPE_A { #[doc = "0: No Default Master-At the end of the current slave access, if no other master request is pending, the slave is disconnected from all masters.This results in a one clock cycle latency for the first access of a burst transfer or for a single access."] NONE, #[doc = "1: Last Default Master-At the end of the current slave access, if no other master request is pending, the slave stays connected to the last master having accessed it.This results in not having one clock cycle latency when the last master tries to access the slave again."] LAST, #[doc = "2: Fixed Default Master-At the end of the current slave access, if no other master request is pending, the slave connects to the fixed master the number that has been written in the FIXED_DEFMSTR field.This results in not having one clock cycle latency when the fixed master tries to access the slave again."] FIXED, } impl From<DEFMSTR_TYPE_A> for u8 { #[inline(always)] fn from(variant: DEFMSTR_TYPE_A) -> Self { match variant { DEFMSTR_TYPE_A::NONE => 0, DEFMSTR_TYPE_A::LAST => 1, DEFMSTR_TYPE_A::FIXED => 2, } } } #[doc = "Reader of field `DEFMSTR_TYPE`"] pub type DEFMSTR_TYPE_R = crate::R<u8, DEFMSTR_TYPE_A>; impl DEFMSTR_TYPE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> crate::Variant<u8, DEFMSTR_TYPE_A> { use crate::Variant::*; match self.bits { 0 => Val(DEFMSTR_TYPE_A::NONE), 1 => Val(DEFMSTR_TYPE_A::LAST), 2 => Val(DEFMSTR_TYPE_A::FIXED), i => Res(i), } } #[doc = "Checks if the value of the field is `NONE`"] #[inline(always)] pub fn is_none(&self) -> bool { *self == DEFMSTR_TYPE_A::NONE } #[doc = "Checks if the value of the field is `LAST`"] #[inline(always)] pub fn is_last(&self) -> bool { *self == DEFMSTR_TYPE_A::LAST } #[doc = "Checks if the value of the field is `FIXED`"] #[inline(always)] pub fn is_fixed(&self) -> bool { *self == DEFMSTR_TYPE_A::FIXED } } #[doc = "Write proxy for field `DEFMSTR_TYPE`"] pub struct DEFMSTR_TYPE_W<'a> { w: &'a mut W, } impl<'a> DEFMSTR_TYPE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DEFMSTR_TYPE_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "No Default Master-At the end of the current slave access, if no other master request is pending, the slave is disconnected from all masters.This results in a one clock cycle latency for the first access of a burst transfer or for a single access."] #[inline(always)] pub fn none(self) -> &'a mut W { self.variant(DEFMSTR_TYPE_A::NONE) } #[doc = "Last Default Master-At the end of the current slave access, if no other master request is pending, the slave stays connected to the last master having accessed it.This results in not having one clock cycle latency when the last master tries to access the slave again."] #[inline(always)] pub fn last(self) -> &'a mut W { self.variant(DEFMSTR_TYPE_A::LAST) } #[doc = "Fixed Default Master-At the end of the current slave access, if no other master request is pending, the slave connects to the fixed master the number that has been written in the FIXED_DEFMSTR field.This results in not having one clock cycle latency when the fixed master tries to access the slave again."] #[inline(always)] pub fn fixed(self) -> &'a mut W { self.variant(DEFMSTR_TYPE_A::FIXED) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 16)) | (((value as u32) & 0x03) << 16); self.w } } #[doc = "Reader of field `FIXED_DEFMSTR`"] pub type FIXED_DEFMSTR_R = crate::R<u8, u8>; #[doc = "Write proxy for field `FIXED_DEFMSTR`"] pub struct FIXED_DEFMSTR_W<'a> { w: &'a mut W, } impl<'a> FIXED_DEFMSTR_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x0f << 18)) | (((value as u32) & 0x0f) << 18); self.w } } impl R { #[doc = "Bits 0:8 - Maximum Bus Grant Duration for Masters"] #[inline(always)] pub fn slot_cycle(&self) -> SLOT_CYCLE_R { SLOT_CYCLE_R::new((self.bits & 0x01ff) as u16) } #[doc = "Bits 16:17 - Default Master Type"] #[inline(always)] pub fn defmstr_type(&self) -> DEFMSTR_TYPE_R { DEFMSTR_TYPE_R::new(((self.bits >> 16) & 0x03) as u8) } #[doc = "Bits 18:21 - Fixed Default Master"] #[inline(always)] pub fn fixed_defmstr(&self) -> FIXED_DEFMSTR_R { FIXED_DEFMSTR_R::new(((self.bits >> 18) & 0x0f) as u8) } } impl W { #[doc = "Bits 0:8 - Maximum Bus Grant Duration for Masters"] #[inline(always)] pub fn slot_cycle(&mut self) -> SLOT_CYCLE_W { SLOT_CYCLE_W { w: self } } #[doc = "Bits 16:17 - Default Master Type"] #[inline(always)] pub fn
(&mut self) -> DEFMSTR_TYPE_W { DEFMSTR_TYPE_W { w: self } } #[doc = "Bits 18:21 - Fixed Default Master"] #[inline(always)] pub fn fixed_defmstr(&mut self) -> FIXED_DEFMSTR_W { FIXED_DEFMSTR_W { w: self } } }
defmstr_type
sparsegraph6.py
""" ************** SparseGraph 6 ************** Read graphs in graph6 and sparse6 format. Format ------ "graph6 and sparse6 are formats for storing undirected graphs in a compact manner, using only printable ASCII characters. Files in these formats have text type and contain one line per graph." http://cs.anu.edu.au/~bdm/data/formats.html See http://cs.anu.edu.au/~bdm/data/formats.txt for details. """ # Original author: D. Eppstein, UC Irvine, August 12, 2003. # The original code at http://www.ics.uci.edu/~eppstein/PADS/ is public domain. __author__ = """Aric Hagberg ([email protected])""" # Copyright (C) 2004-2010 by # Aric Hagberg <[email protected]> # Dan Schult <[email protected]> # Pieter Swart <[email protected]> # All rights reserved.
__all__ = ['read_graph6', 'parse_graph6', 'read_graph6_list', 'read_sparse6', 'parse_sparse6', 'read_sparse6_list'] import networkx as nx from networkx.exception import NetworkXError from networkx.utils import open_file # graph6 def read_graph6(path): """Read simple undirected graphs in graph6 format from path. Returns a single Graph. """ return read_graph6_list(path)[0] def parse_graph6(str): """Read a simple undirected graph in graph6 format from string. Returns a single Graph. """ def bits(): """Return sequence of individual bits from 6-bit-per-value list of data values.""" for d in data: for i in [5,4,3,2,1,0]: yield (d>>i)&1 if str.startswith('>>graph6<<'): str = str[10:] data = graph6data(str) n, data = graph6n(data) nd = (n*(n-1)//2 + 5) // 6 if len(data) != nd: raise NetworkXError(\ 'Expected %d bits but got %d in graph6' % (n*(n-1)//2, len(data)*6)) G=nx.Graph() G.add_nodes_from(range(n)) for (i,j),b in zip([(i,j) for j in range(1,n) for i in range(j)], bits()): if b: G.add_edge(i,j) return G @open_file(0,mode='rt') def read_graph6_list(path): """Read simple undirected graphs in graph6 format from path. Returns a list of Graphs, one for each line in file. """ glist=[] for line in path: line = line.strip() if not len(line): continue glist.append(parse_graph6(line)) return glist # sparse6 def read_sparse6(path): """Read simple undirected graphs in sparse6 format from path. Returns a single MultiGraph.""" return read_sparse6_list(path)[0] @open_file(0,mode='rt') def read_sparse6_list(path): """Read undirected graphs in sparse6 format from path. Returns a list of MultiGraphs, one for each line in file.""" glist=[] for line in path: line = line.strip() if not len(line): continue glist.append(parse_sparse6(line)) return glist def parse_sparse6(string): """Read undirected graph in sparse6 format from string. Returns a MultiGraph. """ if string.startswith('>>sparse6<<'): string = str[10:] if not string.startswith(':'): raise NetworkXError('Expected colon in sparse6') n, data = graph6n(graph6data(string[1:])) k = 1 while 1<<k < n: k += 1 def parseData(): """Return stream of pairs b[i], x[i] for sparse6 format.""" chunks = iter(data) d = None # partial data word dLen = 0 # how many unparsed bits are left in d while 1: if dLen < 1: d = next(chunks) dLen = 6 dLen -= 1 b = (d>>dLen) & 1 # grab top remaining bit x = d & ((1<<dLen)-1) # partially built up value of x xLen = dLen # how many bits included so far in x while xLen < k: # now grab full chunks until we have enough d = next(chunks) dLen = 6 x = (x<<6) + d xLen += 6 x = (x >> (xLen - k)) # shift back the extra bits dLen = xLen - k yield b,x v = 0 G=nx.MultiGraph() G.add_nodes_from(range(n)) for b,x in parseData(): if b: v += 1 if x >= n: break # padding with ones can cause overlarge number here elif x > v: v = x else: G.add_edge(x,v) return G # helper functions def graph6data(str): """Convert graph6 character sequence to 6-bit integers.""" v = [ord(c)-63 for c in str] if min(v) < 0 or max(v) > 63: return None return v def graph6n(data): """Read initial one or four-unit value from graph6 sequence. Return value, rest of seq.""" if data[0] <= 62: return data[0], data[1:] return (data[1]<<12) + (data[2]<<6) + data[3], data[4:]
# BSD license.
config.py
# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. # Please remember to run "make -C docs html" after update "desc" attributes. import argparse import copy import grp import inspect import os import pwd import re import shlex import ssl import sys import textwrap from gunicorn import __version__, util from gunicorn.errors import ConfigError from gunicorn.reloader import reloader_engines KNOWN_SETTINGS = [] PLATFORM = sys.platform def make_settings(ignore=None): settings = {} ignore = ignore or () for s in KNOWN_SETTINGS: setting = s() if setting.name in ignore: continue settings[setting.name] = setting.copy() return settings def auto_int(_, x): # for compatible with octal numbers in python3 if re.match(r'0(\d)', x, re.IGNORECASE): x = x.replace('0', '0o', 1) return int(x, 0) class Config(object): def __init__(self, usage=None, prog=None): self.settings = make_settings() self.usage = usage self.prog = prog or os.path.basename(sys.argv[0]) self.env_orig = os.environ.copy() def __str__(self): lines = [] kmax = max(len(k) for k in self.settings) for k in sorted(self.settings): v = self.settings[k].value if callable(v): v = "<{}()>".format(v.__qualname__) lines.append("{k:{kmax}} = {v}".format(k=k, v=v, kmax=kmax)) return "\n".join(lines) def __getattr__(self, name): if name not in self.settings: raise AttributeError("No configuration setting for: %s" % name) return self.settings[name].get() def __setattr__(self, name, value): if name != "settings" and name in self.settings: raise AttributeError("Invalid access!") super().__setattr__(name, value) def set(self, name, value): if name not in self.settings: raise AttributeError("No configuration setting for: %s" % name) self.settings[name].set(value) def get_cmd_args_from_env(self): if 'GUNICORN_CMD_ARGS' in self.env_orig: return shlex.split(self.env_orig['GUNICORN_CMD_ARGS']) return [] def parser(self): kwargs = { "usage": self.usage, "prog": self.prog } parser = argparse.ArgumentParser(**kwargs) parser.add_argument("-v", "--version", action="version", default=argparse.SUPPRESS, version="%(prog)s (version " + __version__ + ")\n", help="show program's version number and exit") parser.add_argument("args", nargs="*", help=argparse.SUPPRESS) keys = sorted(self.settings, key=self.settings.__getitem__) for k in keys: self.settings[k].add_option(parser) return parser @property def worker_class_str(self): uri = self.settings['worker_class'].get() # are we using a threaded worker? is_sync = uri.endswith('SyncWorker') or uri == 'sync' if is_sync and self.threads > 1: return "gthread" return uri @property def worker_class(self): uri = self.settings['worker_class'].get() # are we using a threaded worker? is_sync = uri.endswith('SyncWorker') or uri == 'sync' if is_sync and self.threads > 1: uri = "gunicorn.workers.gthread.ThreadWorker" worker_class = util.load_class(uri) if hasattr(worker_class, "setup"): worker_class.setup() return worker_class @property def address(self): s = self.settings['bind'].get() return [util.parse_address(util.bytes_to_str(bind)) for bind in s] @property def uid(self): return self.settings['user'].get() @property def gid(self): return self.settings['group'].get() @property def proc_name(self): pn = self.settings['proc_name'].get() if pn is not None: return pn else: return self.settings['default_proc_name'].get() @property def logger_class(self): uri = self.settings['logger_class'].get() if uri == "simple": # support the default uri = LoggerClass.default # if default logger is in use, and statsd is on, automagically switch # to the statsd logger if uri == LoggerClass.default: if 'statsd_host' in self.settings and self.settings['statsd_host'].value is not None: uri = "gunicorn.instrument.statsd.Statsd" logger_class = util.load_class( uri, default="gunicorn.glogging.Logger", section="gunicorn.loggers") if hasattr(logger_class, "install"): logger_class.install() return logger_class @property def is_ssl(self): return self.certfile or self.keyfile @property def ssl_options(self): opts = {} for name, value in self.settings.items(): if value.section == 'SSL': opts[name] = value.get() return opts @property def env(self): raw_env = self.settings['raw_env'].get() env = {} if not raw_env: return env for e in raw_env: s = util.bytes_to_str(e) try: k, v = s.split('=', 1) except ValueError: raise RuntimeError("environment setting %r invalid" % s) env[k] = v return env @property def sendfile(self): if self.settings['sendfile'].get() is not None: return False if 'SENDFILE' in os.environ: sendfile = os.environ['SENDFILE'].lower() return sendfile in ['y', '1', 'yes', 'true'] return True @property def reuse_port(self): return self.settings['reuse_port'].get() @property def paste_global_conf(self): raw_global_conf = self.settings['raw_paste_global_conf'].get() if raw_global_conf is None: return None global_conf = {} for e in raw_global_conf: s = util.bytes_to_str(e) try: k, v = re.split(r'(?<!\\)=', s, 1) except ValueError: raise RuntimeError("environment setting %r invalid" % s) k = k.replace('\\=', '=') v = v.replace('\\=', '=') global_conf[k] = v return global_conf class SettingMeta(type): def __new__(cls, name, bases, attrs): super_new = super().__new__ parents = [b for b in bases if isinstance(b, SettingMeta)] if not parents: return super_new(cls, name, bases, attrs) attrs["order"] = len(KNOWN_SETTINGS) attrs["validator"] = staticmethod(attrs["validator"]) new_class = super_new(cls, name, bases, attrs) new_class.fmt_desc(attrs.get("desc", "")) KNOWN_SETTINGS.append(new_class) return new_class def fmt_desc(cls, desc): desc = textwrap.dedent(desc).strip() setattr(cls, "desc", desc) setattr(cls, "short", desc.splitlines()[0]) class Setting(object): name = None value = None section = None cli = None validator = None type = None meta = None action = None default = None short = None desc = None nargs = None const = None def __init__(self): if self.default is not None: self.set(self.default) def add_option(self, parser): if not self.cli: return args = tuple(self.cli) help_txt = "%s [%s]" % (self.short, self.default) help_txt = help_txt.replace("%", "%%") kwargs = { "dest": self.name, "action": self.action or "store", "type": self.type or str, "default": None, "help": help_txt } if self.meta is not None: kwargs['metavar'] = self.meta if kwargs["action"] != "store": kwargs.pop("type") if self.nargs is not None: kwargs["nargs"] = self.nargs if self.const is not None: kwargs["const"] = self.const parser.add_argument(*args, **kwargs) def copy(self): return copy.copy(self) def get(self): return self.value def set(self, val): if not callable(self.validator): raise TypeError('Invalid validator: %s' % self.name) self.value = self.validator(val) def __lt__(self, other): return (self.section == other.section and self.order < other.order) __cmp__ = __lt__ def __repr__(self): return "<%s.%s object at %x with value %r>" % ( self.__class__.__module__, self.__class__.__name__, id(self), self.value, ) Setting = SettingMeta('Setting', (Setting,), {}) def validate_bool(val): if val is None: return if isinstance(val, bool): return val if not isinstance(val, str): raise TypeError("Invalid type for casting: %s" % val) if val.lower().strip() == "true": return True elif val.lower().strip() == "false": return False else: raise ValueError("Invalid boolean: %s" % val) def validate_dict(val): if not isinstance(val, dict): raise TypeError("Value is not a dictionary: %s " % val) return val def validate_pos_int(val): if not isinstance(val, int): val = int(val, 0) else: # Booleans are ints! val = int(val) if val < 0: raise ValueError("Value must be positive: %s" % val) return val def validate_ssl_version(val): ssl_versions = {} for protocol in [p for p in dir(ssl) if p.startswith("PROTOCOL_")]: ssl_versions[protocol[9:]] = getattr(ssl, protocol) if val in ssl_versions: # string matching PROTOCOL_... return ssl_versions[val] try: intval = validate_pos_int(val) if intval in ssl_versions.values(): # positive int matching a protocol int constant return intval except (ValueError, TypeError): # negative integer or not an integer # drop this in favour of the more descriptive ValueError below pass raise ValueError("Invalid ssl_version: %s. Valid options: %s" % (val, ', '.join(ssl_versions))) def validate_string(val): if val is None: return None if not isinstance(val, str): raise TypeError("Not a string: %s" % val) return val.strip() def validate_file_exists(val): if val is None: return None if not os.path.exists(val): raise ValueError("File %s does not exists." % val) return val def validate_list_string(val): if not val: return [] # legacy syntax if isinstance(val, str): val = [val] return [validate_string(v) for v in val] def validate_list_of_existing_files(val): return [validate_file_exists(v) for v in validate_list_string(val)] def validate_string_to_list(val): val = validate_string(val) if not val: return [] return [v.strip() for v in val.split(",") if v] def validate_class(val): if inspect.isfunction(val) or inspect.ismethod(val): val = val() if inspect.isclass(val): return val return validate_string(val) def validate_callable(arity): def _validate_callable(val): if isinstance(val, str): try: mod_name, obj_name = val.rsplit(".", 1) except ValueError: raise TypeError("Value '%s' is not import string. " "Format: module[.submodules...].object" % val) try: mod = __import__(mod_name, fromlist=[obj_name]) val = getattr(mod, obj_name) except ImportError as e: raise TypeError(str(e)) except AttributeError: raise TypeError("Can not load '%s' from '%s'" "" % (obj_name, mod_name)) if not callable(val): raise TypeError("Value is not callable: %s" % val) if arity != -1 and arity != util.get_arity(val): raise TypeError("Value must have an arity of: %s" % arity) return val return _validate_callable def validate_user(val): if val is None: return os.geteuid() if isinstance(val, int): return val elif val.isdigit(): return int(val) else: try: return pwd.getpwnam(val).pw_uid except KeyError: raise ConfigError("No such user: '%s'" % val) def validate_group(val): if val is None: return os.getegid() if isinstance(val, int): return val elif val.isdigit(): return int(val) else: try: return grp.getgrnam(val).gr_gid except KeyError: raise ConfigError("No such group: '%s'" % val) def validate_post_request(val): val = validate_callable(-1)(val) largs = util.get_arity(val) if largs == 4: return val elif largs == 3: return lambda worker, req, env, _r: val(worker, req, env) elif largs == 2: return lambda worker, req, _e, _r: val(worker, req) else: raise TypeError("Value must have an arity of: 4") def validate_chdir(val): # valid if the value is a string val = validate_string(val) # transform relative paths path = os.path.abspath(os.path.normpath(os.path.join(util.getcwd(), val))) # test if the path exists if not os.path.exists(path): raise ConfigError("can't chdir to %r" % val) return path def validate_hostport(val): val = validate_string(val) if val is None: return None elements = val.split(":") if len(elements) == 2: return (elements[0], int(elements[1])) else: raise TypeError("Value must consist of: hostname:port") def validate_reload_engine(val): if val not in reloader_engines: raise ConfigError("Invalid reload_engine: %r" % val) return val def get_default_config_file(): config_path = os.path.join(os.path.abspath(os.getcwd()), 'gunicorn.conf.py') if os.path.exists(config_path): return config_path return None class ConfigFile(Setting): name = "config" section = "Config File" cli = ["-c", "--config"] meta = "CONFIG" validator = validate_string default = "./gunicorn.conf.py" desc = """\ The Gunicorn config file. A string of the form ``PATH``, ``file:PATH``, or ``python:MODULE_NAME``. Only has an effect when specified on the command line or as part of an application specific configuration. By default, a file named ``gunicorn.conf.py`` will be read from the same directory where gunicorn is being run. .. versionchanged:: 19.4 Loading the config from a Python module requires the ``python:`` prefix. """ class WSGIApp(Setting): name = "wsgi_app" section = "Config File" meta = "STRING" validator = validate_string default = None desc = """\ A WSGI application path in pattern ``$(MODULE_NAME):$(VARIABLE_NAME)``. .. versionadded:: 20.1.0 """ class Bind(Setting): name = "bind" action = "append" section = "Server Socket" cli = ["-b", "--bind"] meta = "ADDRESS" validator = validate_list_string if 'PORT' in os.environ: default = ['0.0.0.0:{0}'.format(os.environ.get('PORT'))] else: default = ['127.0.0.1:8000'] desc = """\ The socket to bind. A string of the form: ``HOST``, ``HOST:PORT``, ``unix:PATH``, ``fd://FD``. An IP is a valid ``HOST``. .. versionchanged:: 20.0 Support for ``fd://FD`` got added. Multiple addresses can be bound. ex.:: $ gunicorn -b 127.0.0.1:8000 -b [::1]:8000 test:app will bind the `test:app` application on localhost both on ipv6 and ipv4 interfaces. If the ``PORT`` environment variable is defined, the default is ``['0.0.0.0:$PORT']``. If it is not defined, the default is ``['127.0.0.1:8000']``. """ class Backlog(Setting): name = "backlog" section = "Server Socket" cli = ["--backlog"] meta = "INT" validator = validate_pos_int type = int default = 2048 desc = """\ The maximum number of pending connections. This refers to the number of clients that can be waiting to be served. Exceeding this number results in the client getting an error when attempting to connect. It should only affect servers under significant load. Must be a positive integer. Generally set in the 64-2048 range. """ class Workers(Setting): name = "workers" section = "Worker Processes" cli = ["-w", "--workers"] meta = "INT" validator = validate_pos_int type = int default = int(os.environ.get("WEB_CONCURRENCY", 1)) desc = """\ The number of worker processes for handling requests. A positive integer generally in the ``2-4 x $(NUM_CORES)`` range. You'll want to vary this a bit to find the best for your particular application's work load. By default, the value of the ``WEB_CONCURRENCY`` environment variable, which is set by some Platform-as-a-Service providers such as Heroku. If it is not defined, the default is ``1``. """ class WorkerClass(Setting): name = "worker_class" section = "Worker Processes" cli = ["-k", "--worker-class"] meta = "STRING" validator = validate_class default = "sync" desc = """\ The type of workers to use. The default class (``sync``) should handle most "normal" types of workloads. You'll want to read :doc:`design` for information on when you might want to choose one of the other worker classes. Required libraries may be installed using setuptools' ``extras_require`` feature. A string referring to one of the following bundled classes: * ``sync`` * ``eventlet`` - Requires eventlet >= 0.24.1 (or install it via ``pip install gunicorn[eventlet]``) * ``gevent`` - Requires gevent >= 1.4 (or install it via ``pip install gunicorn[gevent]``) * ``tornado`` - Requires tornado >= 0.2 (or install it via ``pip install gunicorn[tornado]``) * ``gthread`` - Python 2 requires the futures package to be installed (or install it via ``pip install gunicorn[gthread]``) Optionally, you can provide your own worker by giving Gunicorn a Python path to a subclass of ``gunicorn.workers.base.Worker``. This alternative syntax will load the gevent class: ``gunicorn.workers.ggevent.GeventWorker``. """ class WorkerThreads(Setting): name = "threads" section = "Worker Processes" cli = ["--threads"] meta = "INT" validator = validate_pos_int type = int default = 1 desc = """\ The number of worker threads for handling requests. Run each worker with the specified number of threads. A positive integer generally in the ``2-4 x $(NUM_CORES)`` range. You'll want to vary this a bit to find the best for your particular application's work load. If it is not defined, the default is ``1``. This setting only affects the Gthread worker type. .. note:: If you try to use the ``sync`` worker type and set the ``threads`` setting to more than 1, the ``gthread`` worker type will be used instead. """ class WorkerConnections(Setting): name = "worker_connections" section = "Worker Processes" cli = ["--worker-connections"] meta = "INT" validator = validate_pos_int type = int default = 1000 desc = """\ The maximum number of simultaneous clients. This setting only affects the Eventlet and Gevent worker types. """ class MaxRequests(Setting): name = "max_requests" section = "Worker Processes" cli = ["--max-requests"] meta = "INT" validator = validate_pos_int type = int default = 0 desc = """\ The maximum number of requests a worker will process before restarting. Any value greater than zero will limit the number of requests a worker will process before automatically restarting. This is a simple method to help limit the damage of memory leaks. If this is set to zero (the default) then the automatic worker restarts are disabled. """ class MaxRequestsJitter(Setting): name = "max_requests_jitter" section = "Worker Processes" cli = ["--max-requests-jitter"] meta = "INT" validator = validate_pos_int type = int default = 0 desc = """\ The maximum jitter to add to the *max_requests* setting. The jitter causes the restart per worker to be randomized by ``randint(0, max_requests_jitter)``. This is intended to stagger worker restarts to avoid all workers restarting at the same time. .. versionadded:: 19.2 """ class WaitForNewWorkers(Setting): name = "wait_for_new_workers" section = "Worker Processes" cli = ["--wait-for-new-workers"] validator = validate_bool action = 'store_true' default = False desc = """\ Wait for a new worker to become ready before killing an old worker. """ class MaxRestartingWorkers(Setting): name = "max_restarting_workers" section = "Worker Processes" cli = ["--max-restarting-workers"] meta = "INT" validator = validate_pos_int type = int default = 0 desc = """\ The maximum number of workers which can be restarted at the same time. """ class WarmupRequests(Setting): name = "warmup_requests" section = "Worker Processes" cli = ["--warmup-requests"] meta = "INT" validator = validate_pos_int type = int default = 0 desc = """\ The number of requests a new worker needs to handle until the old worker can be killed. """ class Timeout(Setting): name = "timeout" section = "Worker Processes" cli = ["-t", "--timeout"] meta = "INT" validator = validate_pos_int type = int default = 30 desc = """\ Workers silent for more than this many seconds are killed and restarted. Value is a positive number or 0. Setting it to 0 has the effect of infinite timeouts by disabling timeouts for all workers entirely. Generally, the default of thirty seconds should suffice. Only set this noticeably higher if you're sure of the repercussions for sync workers. For the non sync workers it just means that the worker process is still communicating and is not tied to the length of time required to handle a single request. """ class GracefulTimeout(Setting): name = "graceful_timeout" section = "Worker Processes" cli = ["--graceful-timeout"] meta = "INT" validator = validate_pos_int type = int default = 30 desc = """\ Timeout for graceful workers restart. After receiving a restart signal, workers have this much time to finish serving requests. Workers still alive after the timeout (starting from the receipt of the restart signal) are force killed. """ class Keepalive(Setting): name = "keepalive" section = "Worker Processes" cli = ["--keep-alive"] meta = "INT" validator = validate_pos_int type = int default = 2 desc = """\ The number of seconds to wait for requests on a Keep-Alive connection. Generally set in the 1-5 seconds range for servers with direct connection to the client (e.g. when you don't have separate load balancer). When Gunicorn is deployed behind a load balancer, it often makes sense to set this to a higher value. .. note:: ``sync`` worker does not support persistent connections and will ignore this option. """ class LimitRequestLine(Setting): name = "limit_request_line" section = "Security" cli = ["--limit-request-line"] meta = "INT" validator = validate_pos_int type = int default = 4094 desc = """\ The maximum size of HTTP request line in bytes. This parameter is used to limit the allowed size of a client's HTTP request-line. Since the request-line consists of the HTTP method, URI, and protocol version, this directive places a restriction on the length of a request-URI allowed for a request on the server. A server needs this value to be large enough to hold any of its resource names, including any information that might be passed in the query part of a GET request. Value is a number from 0 (unlimited) to 8190. This parameter can be used to prevent any DDOS attack. """ class LimitRequestFields(Setting): name = "limit_request_fields" section = "Security" cli = ["--limit-request-fields"] meta = "INT" validator = validate_pos_int type = int default = 100 desc = """\ Limit the number of HTTP headers fields in a request. This parameter is used to limit the number of headers in a request to prevent DDOS attack. Used with the *limit_request_field_size* it allows more safety. By default this value is 100 and can't be larger than 32768. """ class LimitRequestFieldSize(Setting): name = "limit_request_field_size" section = "Security" cli = ["--limit-request-field_size"] meta = "INT" validator = validate_pos_int type = int default = 8190 desc = """\ Limit the allowed size of an HTTP request header field. Value is a positive number or 0. Setting it to 0 will allow unlimited header field sizes. .. warning:: Setting this parameter to a very high or unlimited value can open up for DDOS attacks. """ class EnrichResponse(Setting): name = "enrich_response" section = 'Debugging' cli = ['--enrich-response'] validator = validate_bool action = 'store_true' default = False desc = '''\ Add extra information in the http response body. Works only for sync worker type. While handling a request, a few timestamps are taken (in microseconds, since 1st of January, 1970): * ``spawning time`` - when worker object is initialized (this is before forking the new process) * ``time 1`` - immediately after entering "handle_request" * ``time 2`` - just before getting the response * ``time 3`` - immediately after getting the response The following information is inserted into the response body: * ``spawn``: spawning time * ``t1``: time1 * ``d1``: time2 - time1 * ``d2``: time3 - time2 * ``pid``: the pid of the worker handling the request * ``nr``: number of requests handled by this worker so far * ``max``: number of requests planned for this worker (this can be exceeded a little bit because of the rolling restarting strategy) The new response is a json with two keys: "res" contains the original response "info" contains the extra information ''' class Reload(Setting): name = "reload" section = 'Debugging' cli = ['--reload'] validator = validate_bool action = 'store_true' default = False desc = '''\ Restart workers when code changes. This setting is intended for development. It will cause workers to be restarted whenever application code changes. The reloader is incompatible with application preloading. When using a paste configuration be sure that the server block does not import any application code or the reload will not work as designed. The default behavior is to attempt inotify with a fallback to file system polling. Generally, inotify should be preferred if available because it consumes less system resources. .. note:: In order to use the inotify reloader, you must have the ``inotify`` package installed. ''' class ReloadEngine(Setting): name = "reload_engine" section = "Debugging" cli = ["--reload-engine"] meta = "STRING" validator = validate_reload_engine default = "auto" desc = """\ The implementation that should be used to power :ref:`reload`. Valid engines are: * ``'auto'`` * ``'poll'`` * ``'inotify'`` (requires inotify) .. versionadded:: 19.7 """ class ReloadExtraFiles(Setting): name = "reload_extra_files" action = "append" section = "Debugging" cli = ["--reload-extra-file"] meta = "FILES" validator = validate_list_of_existing_files default = [] desc = """\ Extends :ref:`reload` option to also watch and reload on additional files (e.g., templates, configurations, specifications, etc.). .. versionadded:: 19.8 """ class Spew(Setting): name = "spew" section = "Debugging" cli = ["--spew"] validator = validate_bool action = "store_true" default = False desc = """\ Install a trace function that spews every line executed by the server. This is the nuclear option. """ class ConfigCheck(Setting): name = "check_config" section = "Debugging" cli = ["--check-config"] validator = validate_bool action = "store_true" default = False desc = """\ Check the configuration and exit. The exit status is 0 if the configuration is correct, and 1 if the configuration is incorrect. """ class PrintConfig(Setting): name = "print_config" section = "Debugging" cli = ["--print-config"] validator = validate_bool action = "store_true" default = False desc = """\ Print the configuration settings as fully resolved. Implies :ref:`check-config`. """ class PreloadApp(Setting): name = "preload_app" section = "Server Mechanics" cli = ["--preload"] validator = validate_bool action = "store_true" default = False desc = """\ Load application code before the worker processes are forked. By preloading an application you can save some RAM resources as well as speed up server boot times. Although, if you defer application loading to each worker process, you can reload your application code easily by restarting workers. """ class Sendfile(Setting): name = "sendfile" section = "Server Mechanics" cli = ["--no-sendfile"] validator = validate_bool action = "store_const" const = False desc = """\ Disables the use of ``sendfile()``. If not set, the value of the ``SENDFILE`` environment variable is used to enable or disable its usage. .. versionadded:: 19.2 .. versionchanged:: 19.4 Swapped ``--sendfile`` with ``--no-sendfile`` to actually allow disabling. .. versionchanged:: 19.6 added support for the ``SENDFILE`` environment variable """ class ReusePort(Setting): name = "reuse_port" section = "Server Mechanics" cli = ["--reuse-port"] validator = validate_bool action = "store_true" default = False desc = """\ Set the ``SO_REUSEPORT`` flag on the listening socket. .. versionadded:: 19.8 """ class Chdir(Setting): name = "chdir" section = "Server Mechanics" cli = ["--chdir"] validator = validate_chdir default = util.getcwd() desc = """\ Change directory to specified directory before loading apps. """ class Daemon(Setting): name = "daemon" section = "Server Mechanics" cli = ["-D", "--daemon"] validator = validate_bool action = "store_true" default = False desc = """\ Daemonize the Gunicorn process. Detaches the server from the controlling terminal and enters the background. """ class Env(Setting): name = "raw_env" action = "append" section = "Server Mechanics" cli = ["-e", "--env"] meta = "ENV" validator = validate_list_string default = [] desc = """\ Set environment variables in the execution environment. Should be a list of strings in the ``key=value`` format. For example on the command line: .. code-block:: console $ gunicorn -b 127.0.0.1:8000 --env FOO=1 test:app Or in the configuration file: .. code-block:: python raw_env = ["FOO=1"] """ class Pidfile(Setting): name = "pidfile" section = "Server Mechanics" cli = ["-p", "--pid"] meta = "FILE" validator = validate_string default = None desc = """\ A filename to use for the PID file. If not set, no PID file will be written. """ class WorkerTmpDir(Setting): name = "worker_tmp_dir" section = "Server Mechanics" cli = ["--worker-tmp-dir"] meta = "DIR" validator = validate_string default = None desc = """\ A directory to use for the worker heartbeat temporary file. If not set, the default temporary directory will be used. .. note:: The current heartbeat system involves calling ``os.fchmod`` on temporary file handlers and may block a worker for arbitrary time if the directory is on a disk-backed filesystem. See :ref:`blocking-os-fchmod` for more detailed information and a solution for avoiding this problem. """ class User(Setting): name = "user" section = "Server Mechanics" cli = ["-u", "--user"] meta = "USER" validator = validate_user default = os.geteuid() desc = """\ Switch worker processes to run as this user. A valid user id (as an integer) or the name of a user that can be retrieved with a call to ``pwd.getpwnam(value)`` or ``None`` to not change the worker process user. """ class Group(Setting): name = "group" section = "Server Mechanics" cli = ["-g", "--group"] meta = "GROUP" validator = validate_group default = os.getegid() desc = """\ Switch worker process to run as this group. A valid group id (as an integer) or the name of a user that can be retrieved with a call to ``pwd.getgrnam(value)`` or ``None`` to not change the worker processes group. """ class Umask(Setting): name = "umask" section = "Server Mechanics" cli = ["-m", "--umask"] meta = "INT" validator = validate_pos_int type = auto_int default = 0 desc = """\ A bit mask for the file mode on files written by Gunicorn. Note that this affects unix socket permissions. A valid value for the ``os.umask(mode)`` call or a string compatible with ``int(value, 0)`` (``0`` means Python guesses the base, so values like ``0``, ``0xFF``, ``0022`` are valid for decimal, hex, and octal representations) """ class Initgroups(Setting): name = "initgroups" section = "Server Mechanics" cli = ["--initgroups"] validator = validate_bool action = 'store_true' default = False desc = """\ If true, set the worker process's group access list with all of the groups of which the specified username is a member, plus the specified group id. .. versionadded:: 19.7 """ class TmpUploadDir(Setting): name = "tmp_upload_dir" section = "Server Mechanics" meta = "DIR" validator = validate_string default = None desc = """\ Directory to store temporary request data as they are read. This may disappear in the near future. This path should be writable by the process permissions set for Gunicorn workers. If not specified, Gunicorn will choose a system generated temporary directory. """ class SecureSchemeHeader(Setting): name = "secure_scheme_headers" section = "Server Mechanics" validator = validate_dict default = { "X-FORWARDED-PROTOCOL": "ssl", "X-FORWARDED-PROTO": "https", "X-FORWARDED-SSL": "on" } desc = """\ A dictionary containing headers and values that the front-end proxy uses to indicate HTTPS requests. If the source IP is permitted by ``forwarded-allow-ips`` (below), *and* at least one request header matches a key-value pair listed in this dictionary, then Gunicorn will set ``wsgi.url_scheme`` to ``https``, so your application can tell that the request is secure. If the other headers listed in this dictionary are not present in the request, they will be ignored, but if the other headers are present and do not match the provided values, then the request will fail to parse. See the note below for more detailed examples of this behaviour. The dictionary should map upper-case header names to exact string values. The value comparisons are case-sensitive, unlike the header names, so make sure they're exactly what your front-end proxy sends when handling HTTPS requests. It is important that your front-end proxy configuration ensures that the headers defined here can not be passed directly from the client. """ class ForwardedAllowIPS(Setting): name = "forwarded_allow_ips" section = "Server Mechanics" cli = ["--forwarded-allow-ips"] meta = "STRING" validator = validate_string_to_list default = os.environ.get("FORWARDED_ALLOW_IPS", "127.0.0.1") desc = """\ Front-end's IPs from which allowed to handle set secure headers. (comma separate). Set to ``*`` to disable checking of Front-end IPs (useful for setups where you don't know in advance the IP address of Front-end, but you still trust the environment). By default, the value of the ``FORWARDED_ALLOW_IPS`` environment variable. If it is not defined, the default is ``"127.0.0.1"``. .. note:: The interplay between the request headers, the value of ``forwarded_allow_ips``, and the value of ``secure_scheme_headers`` is complex. Various scenarios are documented below to further elaborate. In each case, we have a request from the remote address 134.213.44.18, and the default value of ``secure_scheme_headers``: .. code:: secure_scheme_headers = { 'X-FORWARDED-PROTOCOL': 'ssl', 'X-FORWARDED-PROTO': 'https', 'X-FORWARDED-SSL': 'on' } .. list-table:: :header-rows: 1 :align: center :widths: auto * - ``forwarded-allow-ips`` - Secure Request Headers - Result - Explanation * - .. code:: ["127.0.0.1"] - .. code:: X-Forwarded-Proto: https - .. code:: wsgi.url_scheme = "http" - IP address was not allowed * - .. code:: "*" - <none> - .. code:: wsgi.url_scheme = "http" - IP address allowed, but no secure headers provided * - .. code:: "*" - .. code:: X-Forwarded-Proto: https - .. code:: wsgi.url_scheme = "https" - IP address allowed, one request header matched * - .. code:: ["134.213.44.18"] - .. code:: X-Forwarded-Ssl: on X-Forwarded-Proto: http - ``InvalidSchemeHeaders()`` raised - IP address allowed, but the two secure headers disagreed on if HTTPS was used """ class AccessLog(Setting): name = "accesslog" section = "Logging" cli = ["--access-logfile"] meta = "FILE" validator = validate_string default = None desc = """\ The Access log file to write to. ``'-'`` means log to stdout. """ class DisableRedirectAccessToSyslog(Setting): name = "disable_redirect_access_to_syslog" section = "Logging" cli = ["--disable-redirect-access-to-syslog"] validator = validate_bool action = 'store_true' default = False desc = """\ Disable redirect access logs to syslog. .. versionadded:: 19.8 """ class AccessLogFormat(Setting): name = "access_log_format" section = "Logging" cli = ["--access-logformat"] meta = "STRING" validator = validate_string default = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"' desc = """\ The access log format. =========== =========== Identifier Description =========== =========== h remote address l ``'-'`` u user name t date of the request r status line (e.g. ``GET / HTTP/1.1``) m request method U URL path without query string q query string H protocol s status B response length b response length or ``'-'`` (CLF format) f referer a user agent T request time in seconds M request time in milliseconds D request time in microseconds L request time in decimal seconds p process ID
=========== =========== Use lowercase for header and environment variable names, and put ``{...}x`` names inside ``%(...)s``. For example:: %({x-forwarded-for}i)s """ class ErrorLog(Setting): name = "errorlog" section = "Logging" cli = ["--error-logfile", "--log-file"] meta = "FILE" validator = validate_string default = '-' desc = """\ The Error log file to write to. Using ``'-'`` for FILE makes gunicorn log to stderr. .. versionchanged:: 19.2 Log to stderr by default. """ class Loglevel(Setting): name = "loglevel" section = "Logging" cli = ["--log-level"] meta = "LEVEL" validator = validate_string default = "info" desc = """\ The granularity of Error log outputs. Valid level names are: * ``'debug'`` * ``'info'`` * ``'warning'`` * ``'error'`` * ``'critical'`` """ class CaptureOutput(Setting): name = "capture_output" section = "Logging" cli = ["--capture-output"] validator = validate_bool action = 'store_true' default = False desc = """\ Redirect stdout/stderr to specified file in :ref:`errorlog`. .. versionadded:: 19.6 """ class LoggerClass(Setting): name = "logger_class" section = "Logging" cli = ["--logger-class"] meta = "STRING" validator = validate_class default = "gunicorn.glogging.Logger" desc = """\ The logger you want to use to log events in Gunicorn. The default class (``gunicorn.glogging.Logger``) handles most normal usages in logging. It provides error and access logging. You can provide your own logger by giving Gunicorn a Python path to a class that quacks like ``gunicorn.glogging.Logger``. """ class LogConfig(Setting): name = "logconfig" section = "Logging" cli = ["--log-config"] meta = "FILE" validator = validate_string default = None desc = """\ The log config file to use. Gunicorn uses the standard Python logging module's Configuration file format. """ class LogConfigDict(Setting): name = "logconfig_dict" section = "Logging" validator = validate_dict default = {} desc = """\ The log config dictionary to use, using the standard Python logging module's dictionary configuration format. This option takes precedence over the :ref:`logconfig` option, which uses the older file configuration format. Format: https://docs.python.org/3/library/logging.config.html#logging.config.dictConfig .. versionadded:: 19.8 """ class SyslogTo(Setting): name = "syslog_addr" section = "Logging" cli = ["--log-syslog-to"] meta = "SYSLOG_ADDR" validator = validate_string if PLATFORM == "darwin": default = "unix:///var/run/syslog" elif PLATFORM in ('freebsd', 'dragonfly', ): default = "unix:///var/run/log" elif PLATFORM == "openbsd": default = "unix:///dev/log" else: default = "udp://localhost:514" desc = """\ Address to send syslog messages. Address is a string of the form: * ``unix://PATH#TYPE`` : for unix domain socket. ``TYPE`` can be ``stream`` for the stream driver or ``dgram`` for the dgram driver. ``stream`` is the default. * ``udp://HOST:PORT`` : for UDP sockets * ``tcp://HOST:PORT`` : for TCP sockets """ class Syslog(Setting): name = "syslog" section = "Logging" cli = ["--log-syslog"] validator = validate_bool action = 'store_true' default = False desc = """\ Send *Gunicorn* logs to syslog. .. versionchanged:: 19.8 You can now disable sending access logs by using the :ref:`disable-redirect-access-to-syslog` setting. """ class SyslogPrefix(Setting): name = "syslog_prefix" section = "Logging" cli = ["--log-syslog-prefix"] meta = "SYSLOG_PREFIX" validator = validate_string default = None desc = """\ Makes Gunicorn use the parameter as program-name in the syslog entries. All entries will be prefixed by ``gunicorn.<prefix>``. By default the program name is the name of the process. """ class SyslogFacility(Setting): name = "syslog_facility" section = "Logging" cli = ["--log-syslog-facility"] meta = "SYSLOG_FACILITY" validator = validate_string default = "user" desc = """\ Syslog facility name """ class EnableStdioInheritance(Setting): name = "enable_stdio_inheritance" section = "Logging" cli = ["-R", "--enable-stdio-inheritance"] validator = validate_bool default = False action = "store_true" desc = """\ Enable stdio inheritance. Enable inheritance for stdio file descriptors in daemon mode. Note: To disable the Python stdout buffering, you can to set the user environment variable ``PYTHONUNBUFFERED`` . """ # statsD monitoring class StatsdHost(Setting): name = "statsd_host" section = "Logging" cli = ["--statsd-host"] meta = "STATSD_ADDR" default = None validator = validate_hostport desc = """\ ``host:port`` of the statsd server to log to. .. versionadded:: 19.1 """ # Datadog Statsd (dogstatsd) tags. https://docs.datadoghq.com/developers/dogstatsd/ class DogstatsdTags(Setting): name = "dogstatsd_tags" section = "Logging" cli = ["--dogstatsd-tags"] meta = "DOGSTATSD_TAGS" default = "" validator = validate_string desc = """\ A comma-delimited list of datadog statsd (dogstatsd) tags to append to statsd metrics. .. versionadded:: 20 """ class StatsdPrefix(Setting): name = "statsd_prefix" section = "Logging" cli = ["--statsd-prefix"] meta = "STATSD_PREFIX" default = "" validator = validate_string desc = """\ Prefix to use when emitting statsd metrics (a trailing ``.`` is added, if not provided). .. versionadded:: 19.2 """ class Procname(Setting): name = "proc_name" section = "Process Naming" cli = ["-n", "--name"] meta = "STRING" validator = validate_string default = None desc = """\ A base to use with setproctitle for process naming. This affects things like ``ps`` and ``top``. If you're going to be running more than one instance of Gunicorn you'll probably want to set a name to tell them apart. This requires that you install the setproctitle module. If not set, the *default_proc_name* setting will be used. """ class DefaultProcName(Setting): name = "default_proc_name" section = "Process Naming" validator = validate_string default = "gunicorn" desc = """\ Internal setting that is adjusted for each type of application. """ class PythonPath(Setting): name = "pythonpath" section = "Server Mechanics" cli = ["--pythonpath"] meta = "STRING" validator = validate_string default = None desc = """\ A comma-separated list of directories to add to the Python path. e.g. ``'/home/djangoprojects/myproject,/home/python/mylibrary'``. """ class Paste(Setting): name = "paste" section = "Server Mechanics" cli = ["--paste", "--paster"] meta = "STRING" validator = validate_string default = None desc = """\ Load a PasteDeploy config file. The argument may contain a ``#`` symbol followed by the name of an app section from the config file, e.g. ``production.ini#admin``. At this time, using alternate server blocks is not supported. Use the command line arguments to control server configuration instead. """ class OnStarting(Setting): name = "on_starting" section = "Server Hooks" validator = validate_callable(1) type = callable def on_starting(server): pass default = staticmethod(on_starting) desc = """\ Called just before the master process is initialized. The callable needs to accept a single instance variable for the Arbiter. """ class OnReload(Setting): name = "on_reload" section = "Server Hooks" validator = validate_callable(1) type = callable def on_reload(server): pass default = staticmethod(on_reload) desc = """\ Called to recycle workers during a reload via SIGHUP. The callable needs to accept a single instance variable for the Arbiter. """ class WhenReady(Setting): name = "when_ready" section = "Server Hooks" validator = validate_callable(1) type = callable def when_ready(server): pass default = staticmethod(when_ready) desc = """\ Called just after the server is started. The callable needs to accept a single instance variable for the Arbiter. """ class Prefork(Setting): name = "pre_fork" section = "Server Hooks" validator = validate_callable(2) type = callable def pre_fork(server, worker): pass default = staticmethod(pre_fork) desc = """\ Called just before a worker is forked. The callable needs to accept two instance variables for the Arbiter and new Worker. """ class Postfork(Setting): name = "post_fork" section = "Server Hooks" validator = validate_callable(2) type = callable def post_fork(server, worker): pass default = staticmethod(post_fork) desc = """\ Called just after a worker has been forked. The callable needs to accept two instance variables for the Arbiter and new Worker. """ class PostWorkerInit(Setting): name = "post_worker_init" section = "Server Hooks" validator = validate_callable(1) type = callable def post_worker_init(worker): pass default = staticmethod(post_worker_init) desc = """\ Called just after a worker has initialized the application. The callable needs to accept one instance variable for the initialized Worker. """ class WorkerInt(Setting): name = "worker_int" section = "Server Hooks" validator = validate_callable(1) type = callable def worker_int(worker): pass default = staticmethod(worker_int) desc = """\ Called just after a worker exited on SIGINT or SIGQUIT. The callable needs to accept one instance variable for the initialized Worker. """ class WorkerAbort(Setting): name = "worker_abort" section = "Server Hooks" validator = validate_callable(1) type = callable def worker_abort(worker): pass default = staticmethod(worker_abort) desc = """\ Called when a worker received the SIGABRT signal. This call generally happens on timeout. The callable needs to accept one instance variable for the initialized Worker. """ class PreExec(Setting): name = "pre_exec" section = "Server Hooks" validator = validate_callable(1) type = callable def pre_exec(server): pass default = staticmethod(pre_exec) desc = """\ Called just before a new master process is forked. The callable needs to accept a single instance variable for the Arbiter. """ class PreRequest(Setting): name = "pre_request" section = "Server Hooks" validator = validate_callable(2) type = callable def pre_request(worker, req): worker.log.debug("%s %s" % (req.method, req.path)) default = staticmethod(pre_request) desc = """\ Called just before a worker processes the request. The callable needs to accept two instance variables for the Worker and the Request. """ class PostRequest(Setting): name = "post_request" section = "Server Hooks" validator = validate_post_request type = callable def post_request(worker, req, environ, resp): pass default = staticmethod(post_request) desc = """\ Called after a worker processes the request. The callable needs to accept two instance variables for the Worker and the Request. """ class ChildExit(Setting): name = "child_exit" section = "Server Hooks" validator = validate_callable(2) type = callable def child_exit(server, worker): pass default = staticmethod(child_exit) desc = """\ Called just after a worker has been exited, in the master process. The callable needs to accept two instance variables for the Arbiter and the just-exited Worker. .. versionadded:: 19.7 """ class WorkerExit(Setting): name = "worker_exit" section = "Server Hooks" validator = validate_callable(2) type = callable def worker_exit(server, worker): pass default = staticmethod(worker_exit) desc = """\ Called just after a worker has been exited, in the worker process. The callable needs to accept two instance variables for the Arbiter and the just-exited Worker. """ class NumWorkersChanged(Setting): name = "nworkers_changed" section = "Server Hooks" validator = validate_callable(3) type = callable def nworkers_changed(server, new_value, old_value): pass default = staticmethod(nworkers_changed) desc = """\ Called just after *num_workers* has been changed. The callable needs to accept an instance variable of the Arbiter and two integers of number of workers after and before change. If the number of workers is set for the first time, *old_value* would be ``None``. """ class OnExit(Setting): name = "on_exit" section = "Server Hooks" validator = validate_callable(1) def on_exit(server): pass default = staticmethod(on_exit) desc = """\ Called just before exiting Gunicorn. The callable needs to accept a single instance variable for the Arbiter. """ class ProxyProtocol(Setting): name = "proxy_protocol" section = "Server Mechanics" cli = ["--proxy-protocol"] validator = validate_bool default = False action = "store_true" desc = """\ Enable detect PROXY protocol (PROXY mode). Allow using HTTP and Proxy together. It may be useful for work with stunnel as HTTPS frontend and Gunicorn as HTTP server. PROXY protocol: http://haproxy.1wt.eu/download/1.5/doc/proxy-protocol.txt Example for stunnel config:: [https] protocol = proxy accept = 443 connect = 80 cert = /etc/ssl/certs/stunnel.pem key = /etc/ssl/certs/stunnel.key """ class ProxyAllowFrom(Setting): name = "proxy_allow_ips" section = "Server Mechanics" cli = ["--proxy-allow-from"] validator = validate_string_to_list default = "127.0.0.1" desc = """\ Front-end's IPs from which allowed accept proxy requests (comma separate). Set to ``*`` to disable checking of Front-end IPs (useful for setups where you don't know in advance the IP address of Front-end, but you still trust the environment) """ class KeyFile(Setting): name = "keyfile" section = "SSL" cli = ["--keyfile"] meta = "FILE" validator = validate_string default = None desc = """\ SSL key file """ class CertFile(Setting): name = "certfile" section = "SSL" cli = ["--certfile"] meta = "FILE" validator = validate_string default = None desc = """\ SSL certificate file """ class SSLVersion(Setting): name = "ssl_version" section = "SSL" cli = ["--ssl-version"] validator = validate_ssl_version if hasattr(ssl, "PROTOCOL_TLS"): default = ssl.PROTOCOL_TLS else: default = ssl.PROTOCOL_SSLv23 desc = """\ SSL version to use (see stdlib ssl module's) .. versionchanged:: 20.0.1 The default value has been changed from ``ssl.PROTOCOL_SSLv23`` to ``ssl.PROTOCOL_TLS`` when Python >= 3.6 . """ default = ssl.PROTOCOL_SSLv23 desc = """\ SSL version to use. ============= ============ --ssl-version Description ============= ============ SSLv3 SSLv3 is not-secure and is strongly discouraged. SSLv23 Alias for TLS. Deprecated in Python 3.6, use TLS. TLS Negotiate highest possible version between client/server. Can yield SSL. (Python 3.6+) TLSv1 TLS 1.0 TLSv1_1 TLS 1.1 (Python 3.4+) TLSv1_2 TLS 1.2 (Python 3.4+) TLS_SERVER Auto-negotiate the highest protocol version like TLS, but only support server-side SSLSocket connections. (Python 3.6+) ============= ============ .. versionchanged:: 19.7 The default value has been changed from ``ssl.PROTOCOL_TLSv1`` to ``ssl.PROTOCOL_SSLv23``. .. versionchanged:: 20.0 This setting now accepts string names based on ``ssl.PROTOCOL_`` constants. """ class CertReqs(Setting): name = "cert_reqs" section = "SSL" cli = ["--cert-reqs"] validator = validate_pos_int default = ssl.CERT_NONE desc = """\ Whether client certificate is required (see stdlib ssl module's) """ class CACerts(Setting): name = "ca_certs" section = "SSL" cli = ["--ca-certs"] meta = "FILE" validator = validate_string default = None desc = """\ CA certificates file """ class SuppressRaggedEOFs(Setting): name = "suppress_ragged_eofs" section = "SSL" cli = ["--suppress-ragged-eofs"] action = "store_true" default = True validator = validate_bool desc = """\ Suppress ragged EOFs (see stdlib ssl module's) """ class DoHandshakeOnConnect(Setting): name = "do_handshake_on_connect" section = "SSL" cli = ["--do-handshake-on-connect"] validator = validate_bool action = "store_true" default = False desc = """\ Whether to perform SSL handshake on socket connect (see stdlib ssl module's) """ class Ciphers(Setting): name = "ciphers" section = "SSL" cli = ["--ciphers"] validator = validate_string default = None desc = """\ SSL Cipher suite to use, in the format of an OpenSSL cipher list. By default we use the default cipher list from Python's ``ssl`` module, which contains ciphers considered strong at the time of each Python release. As a recommended alternative, the Open Web App Security Project (OWASP) offers `a vetted set of strong cipher strings rated A+ to C- <https://www.owasp.org/index.php/TLS_Cipher_String_Cheat_Sheet>`_. OWASP provides details on user-agent compatibility at each security level. See the `OpenSSL Cipher List Format Documentation <https://www.openssl.org/docs/manmaster/man1/ciphers.html#CIPHER-LIST-FORMAT>`_ for details on the format of an OpenSSL cipher list. """ class PasteGlobalConf(Setting): name = "raw_paste_global_conf" action = "append" section = "Server Mechanics" cli = ["--paste-global"] meta = "CONF" validator = validate_list_string default = [] desc = """\ Set a PasteDeploy global config variable in ``key=value`` form. The option can be specified multiple times. The variables are passed to the the PasteDeploy entrypoint. Example:: $ gunicorn -b 127.0.0.1:8000 --paste development.ini --paste-global FOO=1 --paste-global BAR=2 .. versionadded:: 19.7 """ class StripHeaderSpaces(Setting): name = "strip_header_spaces" section = "Server Mechanics" cli = ["--strip-header-spaces"] validator = validate_bool action = "store_true" default = False desc = """\ Strip spaces present between the header name and the the ``:``. This is known to induce vulnerabilities and is not compliant with the HTTP/1.1 standard. See https://portswigger.net/research/http-desync-attacks-request-smuggling-reborn. Use with care and only if necessary. """
{header}i request header {header}o response header {variable}e environment variable
21_Merge_Two_Sorted_List.py
#! python3 # __author__ = "YangJiaHao" # date: 2018/2/1 # Definition for singly-linked list. class ListNode: def __init__(self, x): self.val = x self.next = None class Solution: def mergeTwoLists(self, l1, l2): """ :type l1: ListNode :type l2: ListNode :rtype: ListNode """ if l1 == None: return l2 if l2 == None: return l1 head = ListNode(0) node = head while l1 and l2: if l2.val <= l1.val: node.next = l2 l2 = l2.next else: node.next = l1 l1 = l1.next node = node.next node.next = l1 if l1 else l2 return head.next class
: def mergeTwoLists(self, l1, l2): """ :type l1: ListNode :type l2: ListNode :rtype: ListNode """ if l1 == None: return l2 if l2 == None: return l1 if l2.val >= l1.val: head = l1 head.next = self.mergeTwoLists(l1.next, l2) else: head = l2 head.next = self.mergeTwoLists(l1, l2.next) return head if __name__ == '__main__': pass
Solution2
frozen.rs
use std::convert::TryFrom; use std::net; use std::rc::Rc; use std::time::Duration; use bytes::Bytes; use futures_core::Stream; use serde::Serialize; use actix_http::body::Body; use actix_http::http::header::IntoHeaderValue; use actix_http::http::{Error as HttpError, HeaderMap, HeaderName, Method, Uri}; use actix_http::{Error, RequestHead}; use crate::sender::{RequestSender, SendClientRequest}; use crate::ClientConfig; /// `FrozenClientRequest` struct represents clonable client request. /// It could be used to send same request multiple times. #[derive(Clone)] pub struct FrozenClientRequest { pub(crate) head: Rc<RequestHead>, pub(crate) addr: Option<net::SocketAddr>, pub(crate) response_decompress: bool, pub(crate) timeout: Option<Duration>, pub(crate) config: Rc<ClientConfig>, } impl FrozenClientRequest { /// Get HTTP URI of request pub fn get_uri(&self) -> &Uri { &self.head.uri } /// Get HTTP method of this request pub fn get_method(&self) -> &Method { &self.head.method } /// Returns request's headers. pub fn headers(&self) -> &HeaderMap { &self.head.headers } /// Send a body. pub fn send_body<B>(&self, body: B) -> SendClientRequest where B: Into<Body>, { RequestSender::Rc(self.head.clone(), None).send_body( self.addr, self.response_decompress, self.timeout, self.config.as_ref(), body, ) } /// Send a json body. pub fn send_json<T: Serialize>(&self, value: &T) -> SendClientRequest { RequestSender::Rc(self.head.clone(), None).send_json( self.addr, self.response_decompress, self.timeout, self.config.as_ref(), value, ) } /// Send an urlencoded body. pub fn send_form<T: Serialize>(&self, value: &T) -> SendClientRequest { RequestSender::Rc(self.head.clone(), None).send_form( self.addr, self.response_decompress, self.timeout, self.config.as_ref(), value, ) } /// Send a streaming body. pub fn send_stream<S, E>(&self, stream: S) -> SendClientRequest where S: Stream<Item = Result<Bytes, E>> + Unpin + 'static, E: Into<Error> + 'static, { RequestSender::Rc(self.head.clone(), None).send_stream( self.addr, self.response_decompress, self.timeout, self.config.as_ref(), stream, ) } /// Send an empty body. pub fn
(&self) -> SendClientRequest { RequestSender::Rc(self.head.clone(), None).send( self.addr, self.response_decompress, self.timeout, self.config.as_ref(), ) } /// Create a `FrozenSendBuilder` with extra headers pub fn extra_headers(&self, extra_headers: HeaderMap) -> FrozenSendBuilder { FrozenSendBuilder::new(self.clone(), extra_headers) } /// Create a `FrozenSendBuilder` with an extra header pub fn extra_header<K, V>(&self, key: K, value: V) -> FrozenSendBuilder where HeaderName: TryFrom<K>, <HeaderName as TryFrom<K>>::Error: Into<HttpError>, V: IntoHeaderValue, { self.extra_headers(HeaderMap::new()) .extra_header(key, value) } } /// Builder that allows to modify extra headers. pub struct FrozenSendBuilder { req: FrozenClientRequest, extra_headers: HeaderMap, err: Option<HttpError>, } impl FrozenSendBuilder { pub(crate) fn new(req: FrozenClientRequest, extra_headers: HeaderMap) -> Self { Self { req, extra_headers, err: None, } } /// Insert a header, it overrides existing header in `FrozenClientRequest`. pub fn extra_header<K, V>(mut self, key: K, value: V) -> Self where HeaderName: TryFrom<K>, <HeaderName as TryFrom<K>>::Error: Into<HttpError>, V: IntoHeaderValue, { match HeaderName::try_from(key) { Ok(key) => match value.try_into() { Ok(value) => self.extra_headers.insert(key, value), Err(e) => self.err = Some(e.into()), }, Err(e) => self.err = Some(e.into()), } self } /// Complete request construction and send a body. pub fn send_body<B>(self, body: B) -> SendClientRequest where B: Into<Body>, { if let Some(e) = self.err { return e.into(); } RequestSender::Rc(self.req.head, Some(self.extra_headers)).send_body( self.req.addr, self.req.response_decompress, self.req.timeout, self.req.config.as_ref(), body, ) } /// Complete request construction and send a json body. pub fn send_json<T: Serialize>(self, value: &T) -> SendClientRequest { if let Some(e) = self.err { return e.into(); } RequestSender::Rc(self.req.head, Some(self.extra_headers)).send_json( self.req.addr, self.req.response_decompress, self.req.timeout, self.req.config.as_ref(), value, ) } /// Complete request construction and send an urlencoded body. pub fn send_form<T: Serialize>(self, value: &T) -> SendClientRequest { if let Some(e) = self.err { return e.into(); } RequestSender::Rc(self.req.head, Some(self.extra_headers)).send_form( self.req.addr, self.req.response_decompress, self.req.timeout, self.req.config.as_ref(), value, ) } /// Complete request construction and send a streaming body. pub fn send_stream<S, E>(self, stream: S) -> SendClientRequest where S: Stream<Item = Result<Bytes, E>> + Unpin + 'static, E: Into<Error> + 'static, { if let Some(e) = self.err { return e.into(); } RequestSender::Rc(self.req.head, Some(self.extra_headers)).send_stream( self.req.addr, self.req.response_decompress, self.req.timeout, self.req.config.as_ref(), stream, ) } /// Complete request construction and send an empty body. pub fn send(self) -> SendClientRequest { if let Some(e) = self.err { return e.into(); } RequestSender::Rc(self.req.head, Some(self.extra_headers)).send( self.req.addr, self.req.response_decompress, self.req.timeout, self.req.config.as_ref(), ) } }
send
apollo-client.ts
import { useMemo } from 'react' import { ApolloClient, createHttpLink, InMemoryCache, NormalizedCacheObject, from } from '@apollo/client' import { concatPagination } from '@apollo/client/utilities' import { setContext } from '@apollo/client/link/context' import merge from 'deepmerge' import isEqual from 'lodash/isEqual' export const APOLLO_STATE_PROP_NAME = '__APOLLO_STATE__' let apolloClient: ApolloClient<NormalizedCacheObject> let accessToken = '' function getHttpLink() { return createHttpLink({ uri: process.env.GRAPHQL_URL, credentials: 'include', }) } function getAuthLink(token: string = '') { return setContext((_, { headers }) => { return { headers: { ...headers, authorization: !!token?`Bearer ${token}`:'', } }
function createApolloClient(token: string = '') { return new ApolloClient({ ssrMode: typeof window === 'undefined', link: from([getAuthLink(token), getHttpLink()]), cache: new InMemoryCache({ typePolicies: { Query: { fields: { allPosts: concatPagination(), }, }, }, }), defaultOptions: { query: { fetchPolicy: 'no-cache' }, }, }) } let count = 0 export function initializeApollo(token: string = '', initialState: NormalizedCacheObject = null) { const _apolloClient = token===''?null:createApolloClient(token) if (initialState && token !== '') { const existingCache = _apolloClient.extract() // Merge the existing cache into data passed from getStaticProps/getServerSideProps const data = merge(initialState, existingCache, { arrayMerge: (destinationArray, sourceArray) => [ ...sourceArray, ...destinationArray.filter((d) => sourceArray.every((s) => !isEqual(d, s)) ), ], }) // Restore the cache with the merged data _apolloClient.cache.restore(data) } if (typeof window === 'undefined') return _apolloClient if (!!_apolloClient) apolloClient = _apolloClient return apolloClient } export function addApolloState(client: ApolloClient<NormalizedCacheObject>, pageMetadatas) { if (pageMetadatas?.props) { pageMetadatas.props[APOLLO_STATE_PROP_NAME] = client.cache.extract() } return pageMetadatas } export function useApollo(pageProps, token: string) { const state: NormalizedCacheObject = pageProps[APOLLO_STATE_PROP_NAME] const store = initializeApollo(token, state) return store }
}) }
test.py
import os import torch import numpy as np from torchvision import transforms from torch import nn from torch.nn import Softmax from facenet_pytorch import MTCNN from PIL import Image import matplotlib.pyplot as plt from loadOpenFace import prepareOpenFace from collections import OrderedDict import argparse # Check if CUDA GPU is available useCuda = torch.cuda.is_available() if useCuda: print('CUDA is avialable') device = torch.device('cuda:0') else: print('CUDA is not avialable') device = torch.device('cpu') def load_model_from_chk(chk_path): '''Returns model and idx_to_class dictionary''' try: # Load checkpoint checkpoint = torch.load(chk_path, map_location=torch.device('cpu')) idx_to_class = checkpoint['idx_to_class'] # Load the inception model model = prepareOpenFace(useCuda) model.eval() n_classes = len(idx_to_class) # Initialize the classifier model classifier_model = nn.Sequential(OrderedDict([ ("nn4_small_v2", model), ("fc", nn.Linear(736, n_classes)) ])) # load the trained parameters classifier_model.load_state_dict(checkpoint['model_state_dict']) print("Model Loaded from %s" % chk_path) return classifier_model, idx_to_class except FileNotFoundError: print("Model checkpoint not found %s" % chk_path) return None # Load mtcnn to align and crop images mtcnn = MTCNN( image_size=160, margin=0, min_face_size=20, thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=False, device=device ) # tranfomation applied to croped image face_transform = transforms.Compose([transforms.Resize(96), transforms.ToTensor()]) softmax = Softmax(dim=1) # Load the model chk_path = 'models/AvengersClassifier.pth' classifier_model, idx_to_class = load_model_from_chk(chk_path) classifier_model = classifier_model.to(device) classifier_model.eval() def predict(img_path, prob_theshold = 0.9): try: img = Image.open(img_path) except FileNotFoundError: return # Crop, Align and standardize the Image mtcnn_img = mtcnn(img.convert('RGB')) # If no face then return if mtcnn_img is None: plt.show() print("ERROR, Could not detect a face in image") return # Convert to PIL image mtcnn_img = Image.fromarray(np.array(mtcnn_img.permute(1, 2, 0).numpy(), dtype=np.uint8)) # Do the Prediction mtcnn_img = face_transform(mtcnn_img).unsqueeze(0) mtcnn_img = mtcnn_img.to(device)
label = softmax(label) # To Convert the logit to probabilities prob, pred = label.data.max(1, keepdim=True) prob, pred = float(prob), int(pred) if prob < prob_theshold: print("UNKNOWN FACE, but similar to %s with %0.2f%% probability" % (idx_to_class[pred], 100 * prob)) else: print("%s with %0.2f%% probability" % (idx_to_class[pred], 100 * prob)) if __name__ == "__main__": parser = argparse.ArgumentParser( description='Takes in image path and does prediction') parser.add_argument('-p', '--path', help='Image path') args = parser.parse_args() img_path = args.path print() predict(img_path)
with torch.no_grad(): label = classifier_model(mtcnn_img)
main.rs
// I don't like this rule because it changes the semantic // structure of the code. #![allow(clippy::collapsible_else_if)] extern crate lazy_static; extern crate libc_stdhandle; use std::fs::File; use std::io; use std::io::Read; use std::path::PathBuf; use clap::Parser; use termion::cursor::HideCursor; use termion::input::MouseTerminal; use termion::raw::IntoRawMode; use termion::screen::AlternateScreen; mod app; mod flatjson; mod highlighting; mod input; mod jsonparser; mod jsontokenizer; mod lineprinter; mod options; mod screenwriter; mod search; mod terminal; mod truncatedstrview; mod types; mod viewer; mod yamlparser; use app::App; use options::{DataFormat, Opt}; fn main() { let opt = Opt::parse(); let (input_string, input_filename) = match get_input_and_filename(&opt) { Ok(input_and_filename) => input_and_filename, Err(err) => { eprintln!("Unable to get input: {}", err); std::process::exit(1); } }; let data_format = determine_data_format(opt.data_format(), &input_filename); if !isatty::stdout_isatty() { print_pretty_printed_input(input_string, data_format); std::process::exit(0); } // We use freopen to remap /dev/tty to STDIN so that rustyline works when // JSON input is provided via STDIN. rustyline gets initialized when we // create the App, so by putting this before creating the app, we make // sure rustyline gets the /dev/tty input. input::remap_dev_tty_to_stdin(); let stdout = MouseTerminal::from(HideCursor::from(AlternateScreen::from( io::stdout().into_raw_mode().unwrap(), ))); let mut app = match App::new( &opt, input_string, data_format, input_filename, Box::new(stdout), ) { Ok(jl) => jl, Err(err) => { eprintln!("{}", err); return; } }; app.run(Box::new(input::get_input())); } fn print_pretty_printed_input(input: String, data_format: DataFormat) { // Don't try to pretty print YAML input; just pass it through. if data_format == DataFormat::Yaml { print!("{}", input); return; } let flatjson = match flatjson::parse_top_level_json(input) { Ok(flatjson) => flatjson, Err(err) => { eprintln!("Unable to parse input: {:?}", err); std::process::exit(1); } }; print!("{}", flatjson.pretty_printed().unwrap()); } fn get_input_and_filename(opt: &Opt) -> io::Result<(String, String)>
fn determine_data_format(format: Option<DataFormat>, filename: &str) -> DataFormat { format.unwrap_or_else(|| { match std::path::Path::new(filename) .extension() .and_then(std::ffi::OsStr::to_str) { Some("yml") | Some("yaml") => DataFormat::Yaml, _ => DataFormat::Json, } }) }
{ let mut input_string = String::new(); let filename; match &opt.input { None => { if isatty::stdin_isatty() { println!("Missing filename (\"jless --help\" for help)"); std::process::exit(1); } filename = "STDIN".to_string(); io::stdin().read_to_string(&mut input_string)?; } Some(path) => { if *path == PathBuf::from("-") { filename = "STDIN".to_string(); io::stdin().read_to_string(&mut input_string)?; } else { File::open(path)?.read_to_string(&mut input_string)?; filename = String::from(path.file_name().unwrap().to_string_lossy()); } } } Ok((input_string, filename)) }
copymodetable.go
// Copyright 2019-2020 Graham Clark. All rights reserved. Use of this source // code is governed by the MIT license that can be found in the LICENSE // file. // Package copymodetable provides a wrapper around a table that supports copy mode. // The implementation currently supports clipping a whole row and also the whole // table by providing these as interfaces to the New function. It's easy to imagine // supporting narrowing the copy selection to a single column, but I don't need // that yet... package copymodetable import ( "github.com/gcla/gowid" "github.com/gcla/gowid/widgets/list"
lru "github.com/hashicorp/golang-lru" ) //====================================================================== type IRowCopier interface { CopyRow(id table.RowId) []gowid.ICopyResult } type ITableCopier interface { CopyTable() []gowid.ICopyResult } type ICopyModeTableNeeds interface { gowid.IWidget list.IWalker table.IGoToMiddle withscrollbar.IScrollOneLine withscrollbar.IScrollOnePage CurrentRow() int SetCurrentRow(table.Position) Model() table.IModel SetModel(table.IModel, gowid.IApp) Cache() *lru.Cache OnFocusChanged(gowid.IWidgetChangedCallback) } type Widget struct { ICopyModeTableNeeds RowClip IRowCopier // Knows how to make a clip result set given a row AllClip ITableCopier // Knows how to make a clip result set from the whole table name string // for widget "id" clip gowid.IClipboardSelected // function to modify selected widget for copying } type idstring string // Needed to satisfy copy mode func (i idstring) ID() interface{} { return i } func New(wrapped ICopyModeTableNeeds, rowClip IRowCopier, allClip ITableCopier, name string, clip gowid.IClipboardSelected) *Widget { return &Widget{ ICopyModeTableNeeds: wrapped, RowClip: rowClip, AllClip: allClip, name: name, clip: clip, } } func (w *Widget) Render(size gowid.IRenderSize, focus gowid.Selector, app gowid.IApp) gowid.ICanvas { if app.InCopyMode() && app.CopyModeClaimedBy().ID() == w.ID() && focus.Focus { row := w.CurrentRow() if app.CopyModeClaimedAt() == 0 { row = -1 // all rows } origModel := w.Model() model := copyModeTableModel{ IModel: origModel, clip: w.clip, app: app, row: row, } w.SetModel(model, app) res := w.ICopyModeTableNeeds.Render(size, focus, app) w.SetModel(origModel, app) return res } else { return w.ICopyModeTableNeeds.Render(size, focus, app) } } // The app stores which widget claims copy mode, and so each widget must check whether it's the // one when it render itself. func (w *Widget) ID() interface{} { return idstring(w.name) } func (w *Widget) SubWidget() gowid.IWidget { return w.ICopyModeTableNeeds } func (w *Widget) CopyModeLevels() int { return 1 // one row, all rows } func (w *Widget) UserInput(ev interface{}, size gowid.IRenderSize, focus gowid.Selector, app gowid.IApp) bool { return gowid.CopyModeUserInput(w, ev, size, focus, app) } func (w *Widget) Clips(app gowid.IApp) []gowid.ICopyResult { // 1 is whole table // 0 is just row diff := w.CopyModeLevels() - (app.CopyModeClaimedAt() - app.CopyLevel()) var rd []gowid.ICopyResult if diff == 0 { cur := w.CurrentRow() rid, ok := w.Model().RowIdentifier(cur) if ok { rd = w.RowClip.CopyRow(rid) } } else { rd = w.AllClip.CopyTable() } return rd } //====================================================================== // copyModeTableModel exists solely to provide an "overridden" implementation of CellWidgets e.g. to color the // selected row yellow. To do this, it needs clip for the AlterWidget function, and the row to alter (or // all). This model is set on the underlying table before Render() is called on the underlying table. type copyModeTableModel struct { table.IModel clip gowid.IClipboardSelected app gowid.IApp row int } var _ table.IModel = copyModeTableModel{} func (c copyModeTableModel) CellWidgets(row table.RowId) []gowid.IWidget { res := c.IModel.CellWidgets(row) dothisrow := false if c.row == -1 { dothisrow = true // do every row i.e. every call to CellWidgets() } else { rid, ok := c.IModel.RowIdentifier(c.row) if ok && (row == rid) { dothisrow = true } } if dothisrow { for col := 0; col < len(res); col++ { res[col] = c.clip.AlterWidget(res[col], c.app) } } return res } //====================================================================== // Local Variables: // mode: Go // fill-column: 110 // End:
"github.com/gcla/gowid/widgets/table" "github.com/gcla/termshark/v2/widgets/withscrollbar"
qgis3script-importernvdbdata.py
# -*- coding: utf-8 -*- """ Script for å interaktivt legge til NVDB-vegnett og fagdata via python kommandolinje i QGIS. Se dokumentasjon på bruk av nvdbapi - funskjoner på https://github.com/LtGlahn/nvdbapi-V3 Legg dette scriptet et sted hvor det er lettvint å finne fra QGIS. F.eks. C:/Users/<dittbrukernavn>. EKSEMPLER #Vegnett europaveger Trondheim kommune v = nvdbVegnett() v.addfilter_geo({ 'kommune' : 1601, 'vegreferanse' : 'E' })" ) nvdbsok2qgis( v, lagnavn='Europaveger Trondheim') # Vegnett innenfor kartutsnitt v = nvdbVegnett() nvdb2kart( v, iface) # Bomstasjoner b = nvdbFagdata(45) nvdbsok2qgis( b) # Søk etter fartsgrenser innenfor kartflaten, legg til f = nvdbFagdata(105) nvdb2kart( f, iface) # Søk etter kjent objektID, legg til kartflaten nvdb2kart( 572672190, iface ) """ import sys #########################################################33 ## ## Endre stien til den mappen der du har lastet ned dette ## reposet https://github.com/LtGlahn/nvdbapi-V3 ## ## Merk at hvis du laster ned repos som zip-fil og høyrekikker->Pakk ut alle ## så vil stien være NEDLASTING\\nvdbapi-V3-master\\nvdbapi-V3-master ## # nvdblibrary = 'C:/Data/leveranser/nvdbapi-V3' nvdblibrary = 'C:\\Users\\jajens\Downloads\\nvdbapi-V3-master\\nvdbapi-V3-master' # nvdblibrary = 'C:\Users\<DITT BRUKERNAVN>\Downloads\\nvdbapi-V3-master\nvdbapi-V3-master' # nvdblibrary = '/home/jan/Documents/jobb/nvdbapi-V3' ## Hvis vi ikke klarer å importere nvdbapiv3 så prøver vi å føye ## mappen nvdblibrary til søkestien. try: import nvdbapiv3 except ModuleNotFoundError: print( "Fant ikke nvdbapiv3 i sys.path, legger til mappen", nvdblibrary) sys.path.append( nvdblibrary ) try: import nvdbapiv3 except ModuleNotFoundError as e: print( "\nImport av nvdbapiv3 feiler for", nvdblibrary ) raise ModuleNotFoundError( "==> Variabel nvdblibrary skal peke til mappen https://github.com/LtGlahn/nvdbapi-V3 <==" ) else: print( "SUKSESS - kan importere nvdbapiv3 etter at vi la til", nvdblibrary, "i sys.path" ) else:
print( "HURRA - vi kan importere nvdbapiv3 " ) ## Her importerer vi de funksjonene vi trenger from nvdbapiv3 import nvdbFagdata, nvdbVegnett from nvdbapiV3qgis3 import nvdb2kart, nvdbsok2qgis, url2kart, nvdb2kartListe ## Bruk linjene nedenfor for debugging ## Funksjonskallene på python-konsollet i QGIS blir da ## ## >>> sok = nvdbapiv3.nvdbFagdata(86) ## >>> nvdbapiV3qgis3.nvdb2kart( sok, iface ) ## # import importlib # import nvdbapiV3qgis3 # import nvdbapiv3 # importlib.reload(nvdbapiV3qgis3 ) # importlib.reload(nvdbapiv3 )
newLemonchiffonTransparent.go
package factoryColorNames import "image/color" func NewLemonchiffonTransparent() color.RGBA
{ return color.RGBA{R: 0xff, G: 0xfa, B: 0xcd, A: 0x00} // rgb(255, 250, 205) }
karma.conf.js
// Karma configuration // Generated on Tue Sep 01 2015 12:55:42 GMT+0200 (CEST) module.exports = function(config) { config.set({ // base path that will be used to resolve all patterns (eg. files, exclude) basePath: '', // frameworks to use // available frameworks: https://npmjs.org/browse/keyword/karma-adapter frameworks: ['jasmine', 'jasmine-sinon'], // list of files / patterns to load in the browser
files: [ 'lib/*.js', 'tests/*.js' ], // list of files to exclude exclude: [ ], // preprocess matching files before serving them to the browser // available preprocessors: https://npmjs.org/browse/keyword/karma-preprocessor preprocessors: { }, // test results reporter to use // possible values: 'dots', 'progress' // available reporters: https://npmjs.org/browse/keyword/karma-reporter reporters: ['progress'], // web server port port: 9876, // enable / disable colors in the output (reporters and logs) colors: true, // level of logging // possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG logLevel: config.LOG_INFO, // enable / disable watching file and executing tests whenever any file changes autoWatch: true, // start these browsers // available browser launchers: https://npmjs.org/browse/keyword/karma-launcher browsers: ['PhantomJS', 'Firefox', 'Chrome'], // Continuous Integration mode // if true, Karma captures browsers, runs the tests and exits singleRun: false }) }
gracebot.py
import logging from collections import Counter, defaultdict import aiogram from aiogram import Bot, types from aiogram.utils.emoji import emojize from detector import Detector from gwevents import Events, time_ago from keyboard import InlineKeyboard from permanentset import PermanentSet class GraceBot(Bot): def __init__(self, token: str): super().__init__(token=token) self.events: Events = Events() self.events.update_all() self.event_keyboards: dict = defaultdict(InlineKeyboard) self.new_event_messages_send: PermanentSet = PermanentSet( "new_event_messages_send.txt", str ) self.subscribers: PermanentSet = PermanentSet("subscribers.txt", int) self.event_types: dict = { # Probability that the source is a binary black hole merger (both # objects heavier than 5 solar masses) "BBH": "binary black hole merger", # Probability that the source is a binary neutron star merger # (both objects lighter than 3 solar masses) "BNS": "binary neutron star merger", # Probability that the source is a neutron star-black hole merger # (primary heavier than 5 solar masses, secondary lighter than 3 # solar masses) "NSBH": "neutron star black hole merger", # Probability that the source is terrestrial(i.e., a background # noise fluctuation or a glitch) "Terrestrial": "terrestrial", # Probability that the source has at least one object between 3 and # 5 solar masses "MassGap": "mass gap", } async def send_preliminary(self, message): event_id = event_id_from_message(message) logging.info(f"Event to update from preliminary message: {event_id}") if event_id in self.new_event_messages_send.data: return else: self.events.update_events_last_week() self.new_event_messages_send.add(event_id) text = f"A new event has been measured!\n\n" await self._send_event_info_to_all_users(event_id, text) async def send_update(self, message): event_id = event_id_from_message(message) self.events.update_single(event_id) text = f"Event {event_id} has been updated.\n\n" await self._send_event_info_to_all_users(event_id, text) async def send_retraction(self, message): event_id = event_id_from_message(message) text = f"Event {event_id} has been retracted. The event details were:\n\n" await self._send_event_info_to_all_users(event_id, text) self.events.update_all() async def _send_event_info_to_all_users(self, event_id: str, pre_text: str) -> None: for user_id in self.subscribers.data: try: await self.send_event_info(user_id, event_id, pre_text) except aiogram.utils.exceptions.BotBlocked: logging.info(f"User {user_id} has blocked the bot.") continue async def send_event_info( self, chat_id: str, event_id: str, pre_text: str = "" ) -> None: """ Send information of a specific event to the user. Parameters ---------- chat_id : str Where to send the message to. event_id : str The event to send the information about. pre_text : str Will be added to the beginning of the message. Returns ------- None """ try: event = self.events.data[event_id] except KeyError: logging.error(f"Warning couldn't find event with id {event_id}") return link = f"https://gracedb.ligo.org/superevents/{event_id}/view/" text = ( pre_text + f"*{event_id.upper()}*\n" + f"{time_ago(event['created'])}\n\n" ) try: event_type = self.events.get_likely_event_type(event_id) confidence = self.events.data[event_id]["event_types"][event_type] text += ( f"Unconfirmed {self.event_types[event_type]} ({confidence:.2%}) event." ) distance_mean = round(event["distance_mean_Mly"] / 1000, 2) distance_std = round(event["distance_std_Mly"] / 1000, 2) text = ( text[:-1] + f" at {distance_mean} ± {distance_std} billion light years." ) instruments = self.events.data[event_id]["instruments_long"] text += f" The event was measured by {inline_list(instruments)}." except KeyError: pass text += f"\n\n[Event page]({link})" await self.send_message(chat_id, text, parse_mode="markdown") try: with open(self.events.picture(event_id), "rb") as picture: await self.send_photo(chat_id, picture) except FileNotFoundError: logging.error("Couldn't find the event image") return None async def send_welcome_message(self, message: types.Message) -> None: """ Send a welcome message to the user. Parameters ---------- message : aiogram.types.Message The message send by the user. Returns ------- None. """ text = ( "Stay up-to-date on LIGO/Virgo gravitational wave events!\n" "\n" "You can /subscribe to automatically receive a message whenever a new event is " "measured, or an existing event is updated. Use /unsubscribe to stop receiving " "messages.\n" "\n" "Furthermore you can check out the /latest event, or select a past /event. " "Use /stats to see and overview of all O3 events or view the live detector /status." ) await self.send_message(message.chat.id, text) async def send_latest(self, message: types.Message) -> None: """ Send some details of the most recent gravitational wave event. Parameters ---------- message : aiogram.types.Message The message send by the user. Returns ------- None. """ event_id = list(self.events.latest)[0] await self.send_event_info(message.chat.id, event_id) @property def event_keys(self) -> list: return [f"{id}_{info['most_likely']}" for id, info in self.events.data.items()] async def send_event_selector(self, message: types.Message) -> None: """ User can select any event from the O3 run and get a message with the details. Parameters ---------- message : types.Message Returns ------- None """ self.event_keyboards[message.chat.id] = InlineKeyboard( self.event_keys, rows=4, columns=2 ) await self.send_message( chat_id=message.chat.id, text="Select the event you want to see the details of.", reply_markup=self.event_keyboards[message.chat.id], ) async def event_selector_callback_handler(self, query: types.CallbackQuery) -> None: """ This is called when the user presses a button to select an event. Parameters ---------- query : types.CallbackQuery Callback query which contains info on which message the InlineKeyboard is attached to. Returns ------- None """ await query.answer() # send answer to close the rounding circle answer_data = query.data logging.debug(f"answer_data={answer_data}") user_id = query.from_user.id valid_event_ids = self.event_keyboards[user_id].visible_keys if answer_data in valid_event_ids: event_id, _ = answer_data.split("_") await self.send_event_info(user_id, event_id) else: await self.event_keyboards[user_id].update(query) async def send_o3_stats(self, message: types.Message) -> None: """ Send some statistics of observational run 3 (O3). Parameters ---------- message : aiogram.types.Message The message send by the user. Returns ------- None. """ # TODO take confirmed from other source since it will not be updated # in graceDB if they are confirmed. For that use: # https://www.gw-openscience.org/catalog/GWTC-1-confident/html/ event_counter = Counter( [info["most_likely"] for info in self.events.data.values()] ) unconfirmed_bbh = event_counter["BBH"] unconfirmed_bns = event_counter["BNS"] unconfirmed_nsbh = event_counter["NSBH"] unconfirmed_mg = event_counter["MassGap"] terrestrial = event_counter["Terrestrial"] text = ( f"Observational run 3 has detected *{len(self.events.data)}* " "events since April 1st 2019.\n\n" "" "*Event types*\n" f"Binary black hole mergers: *{unconfirmed_bbh}*.\n" f"Binary neutron star mergers: *{unconfirmed_bns}*.\n" f"Neutron star black hole mergers: *{unconfirmed_nsbh}*\n" f"At least one object between 3 and 5 solar masses: *{unconfirmed_mg}*.\n" f"Likely terrestrial (false alarm): *{terrestrial}*.\n" ) await self.send_message(message.chat.id, text, parse_mode="markdown") async def send_detector_status(self, message: types.Message) -> None: """ Send status of all three detectors to the user. Parameters ---------- message : types.Message The message send by the user. Returns ------- None """ detectors = [Detector("Hanford"), Detector("Livingston"), Detector("Virgo")] detector_status = [] for detector in detectors: hours = detector.status_duration.days * 24 + ( detector.status_duration.seconds // 3600 ) minutes = (detector.status_duration.seconds % 3600) // 60 detector_status.append( f"{emojize(detector.status_icon)} {detector.name}: " f"{detector.status} {hours}h {minutes}m" ) text = "\n".join(detector_status) await self.send_message(message.chat.id, text) async def add_subscriber(self, message: types.Message) -> None: """ Add the user from the message to the subscriber list. Parameters ---------- message : aiogram.types.Message The message send by the user. Returns ------- None. """ user_id = message.chat.id if self.subscribers.is_in_list(user_id): await self.send_message(user_id, "You are already subscribed.") else: self.subscribers.add(message.chat.id) await self.send_message( user_id, "You will now receive the latest event updates." ) async def remove_subscriber(self, message: types.Message) -> None: "
def event_id_from_message(message: types.Message) -> str: """ Return the event id which is assumed to come right after the command. Parameters ---------- message : aiogram.types.Message The message send by the user. Returns ------- The event id. """ try: event_id = message.text.split(" ")[-1] except KeyError: event_id = None return event_id def inline_list(items): if len(items) == 0: return "" elif len(items) == 1: return items[0] else: return ", ".join(items[:-1]) + f" and {items[-1]}"
"" Remove the user from the message from the subscriber list. Parameters ---------- message : aiogram.types.Message The message send by the user. Returns ------- None. """ user_id = message.chat.id if not self.subscribers.is_in_list(user_id): await self.send_message(user_id, "You are not subscribed.") else: self.subscribers.remove(message.chat.id) await self.send_message( user_id, "You will no longer receive the latest event updates." )
raid.py
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ DRAC RAID specific methods """ from collections import defaultdict import math from futurist import periodics from ironic_lib import metrics_utils from oslo_log import log as logging from oslo_utils import importutils from oslo_utils import units from ironic.common import exception from ironic.common.i18n import _ from ironic.common import raid as raid_common from ironic.conductor import task_manager from ironic.conductor import utils as manager_utils from ironic.conf import CONF from ironic.drivers import base from ironic.drivers.modules import deploy_utils from ironic.drivers.modules.drac import common as drac_common from ironic.drivers.modules.drac import job as drac_job drac_exceptions = importutils.try_import('dracclient.exceptions') drac_constants = importutils.try_import('dracclient.constants') LOG = logging.getLogger(__name__) METRICS = metrics_utils.get_metrics_logger(__name__) _CURRENT_RAID_CONTROLLER_MODE = "RAIDCurrentControllerMode" _REQUESTED_RAID_CONTROLLER_MODE = "RAIDRequestedControllerMode" _EHBA_MODE = "Enhanced HBA" _RAID_MODE = "RAID" RAID_LEVELS = { '0': { 'min_disks': 1, 'max_disks': 1000, 'type': 'simple', 'overhead': 0 }, '1': { 'min_disks': 2, 'max_disks': 2, 'type': 'simple', 'overhead': 1 }, '5': { 'min_disks': 3, 'max_disks': 1000, 'type': 'simple', 'overhead': 1 }, '6': { 'min_disks': 4, 'max_disks': 1000, 'type': 'simple', 'overhead': 2 }, '1+0': { 'type': 'spanned', 'span_type': '1' }, '5+0': { 'type': 'spanned', 'span_type': '5' }, '6+0': { 'type': 'spanned', 'span_type': '6' } } def list_raid_controllers(node): """List the RAID controllers of the node. :param node: an ironic node object. :returns: a list of RAIDController objects from dracclient. :raises: DracOperationError on an error from python-dracclient. """ client = drac_common.get_drac_client(node) try: return client.list_raid_controllers() except drac_exceptions.BaseClientException as exc: LOG.error('DRAC driver failed to get the list of RAID controllers ' 'for node %(node_uuid)s. Reason: %(error)s.', {'node_uuid': node.uuid, 'error': exc}) raise exception.DracOperationError(error=exc) def list_virtual_disks(node): """List the virtual disks of the node. :param node: an ironic node object. :returns: a list of VirtualDisk objects from dracclient. :raises: DracOperationError on an error from python-dracclient. """ client = drac_common.get_drac_client(node) try: return client.list_virtual_disks() except drac_exceptions.BaseClientException as exc: LOG.error('DRAC driver failed to get the list of virtual disks ' 'for node %(node_uuid)s. Reason: %(error)s.', {'node_uuid': node.uuid, 'error': exc}) raise exception.DracOperationError(error=exc) def list_physical_disks(node): """List the physical disks of the node. :param node: an ironic node object. :returns: a list of PhysicalDisk objects from dracclient. :raises: DracOperationError on an error from python-dracclient. """ client = drac_common.get_drac_client(node) try: return client.list_physical_disks() except drac_exceptions.BaseClientException as exc: LOG.error('DRAC driver failed to get the list of physical disks ' 'for node %(node_uuid)s. Reason: %(error)s.', {'node_uuid': node.uuid, 'error': exc}) raise exception.DracOperationError(error=exc) def _is_raid_controller(node, raid_controller_fqdd, raid_controllers=None): """Find out if object's fqdd is for a raid controller or not :param node: an ironic node object :param raid_controller_fqdd: The object's fqdd we are testing to see if it is a raid controller or not. :param raid_controllers: A list of RAIDControllers used to check for the presence of BOSS cards. If None, the iDRAC will be queried for the list of controllers. :returns: boolean, True if the device is a RAID controller, False if not. """ client = drac_common.get_drac_client(node) try: return client.is_raid_controller(raid_controller_fqdd, raid_controllers) except drac_exceptions.BaseClientException as exc: LOG.error('Unable to determine if controller %(raid_controller_fqdd)s ' 'on node %(node_uuid)s is a RAID controller. ' 'Reason: %(error)s. ', {'raid_controller_fqdd': raid_controller_fqdd, 'node_uuid': node.uuid, 'error': exc}) raise exception.DracOperationError(error=exc) def _validate_job_queue(node, raid_controller=None): """Validate that there are no pending jobs for this controller. :param node: an ironic node object. :param raid_controller: id of the RAID controller. """ kwargs = {} if raid_controller: kwargs["name_prefix"] = "Config:RAID:%s" % raid_controller drac_job.validate_job_queue(node, **kwargs) def create_virtual_disk(node, raid_controller, physical_disks, raid_level, size_mb, disk_name=None, span_length=None, span_depth=None): """Create a single virtual disk on a RAID controller. The created virtual disk will be in pending state. The DRAC card will do the actual configuration once the changes are applied by calling the ``commit_config`` method. :param node: an ironic node object. :param raid_controller: id of the RAID controller. :param physical_disks: ids of the physical disks. :param raid_level: RAID level of the virtual disk. :param size_mb: size of the virtual disk. :param disk_name: name of the virtual disk. (optional) :param span_depth: Number of spans in virtual disk. (optional) :param span_length: Number of disks per span. (optional) :returns: a dictionary containing the commit_needed key with a boolean value indicating whether a config job must be created for the values to be applied. :raises: DracOperationError on an error from python-dracclient. """ # This causes config to fail, because the boot mode is set via a config # job. _validate_job_queue(node, raid_controller) client = drac_common.get_drac_client(node) try: return client.create_virtual_disk(raid_controller, physical_disks, raid_level, size_mb, disk_name, span_length, span_depth) except drac_exceptions.BaseClientException as exc: LOG.error('DRAC driver failed to create virtual disk for node ' '%(node_uuid)s. Reason: %(error)s.', {'node_uuid': node.uuid, 'error': exc}) raise exception.DracOperationError(error=exc) def delete_virtual_disk(node, virtual_disk): """Delete a single virtual disk on a RAID controller. The deleted virtual disk will be in pending state. The DRAC card will do the actual configuration once the changes are applied by calling the ``commit_config`` method. :param node: an ironic node object. :param virtual_disk: id of the virtual disk. :returns: a dictionary containing the commit_needed key with a boolean value indicating whether a config job must be created for the values to be applied. :raises: DracOperationError on an error from python-dracclient. """ # NOTE(mgoddard): Cannot specify raid_controller as we don't know it. _validate_job_queue(node) client = drac_common.get_drac_client(node) try: return client.delete_virtual_disk(virtual_disk) except drac_exceptions.BaseClientException as exc: LOG.error('DRAC driver failed to delete virtual disk ' '%(virtual_disk_fqdd)s for node %(node_uuid)s. ' 'Reason: %(error)s.', {'virtual_disk_fqdd': virtual_disk, 'node_uuid': node.uuid, 'error': exc}) raise exception.DracOperationError(error=exc) def _reset_raid_config(node, raid_controller): """Delete all virtual disk and unassign all hotspares physical disk :param node: an ironic node object. :param raid_controller: id of the RAID controller. :returns: a dictionary containing - The is_commit_required needed key with a boolean value indicating whether a config job must be created for the values to be applied. - The is_reboot_required key with a RebootRequired enumerated value indicating whether the server must be rebooted to reset configuration. :raises: DracOperationError on an error from python-dracclient. """ try: _validate_job_queue(node, raid_controller) client = drac_common.get_drac_client(node) return client.reset_raid_config(raid_controller) except drac_exceptions.BaseClientException as exc: LOG.error('DRAC driver failed to delete all virtual disk ' 'and unassign all hotspares ' 'on %(raid_controller_fqdd)s ' 'for node %(node_uuid)s. ' 'Reason: %(error)s.', {'raid_controller_fqdd': raid_controller, 'node_uuid': node.uuid, 'error': exc}) raise exception.DracOperationError(error=exc) def clear_foreign_config(node, raid_controller): """Free up the foreign drives. :param node: an ironic node object. :param raid_controller: id of the RAID controller. :returns: a dictionary containing - The is_commit_required needed key with a boolean value indicating whether a config job must be created for the values to be applied. - The is_reboot_required key with a RebootRequired enumerated value indicating whether the server must be rebooted to clear foreign configuration. :raises: DracOperationError on an error from python-dracclient. """ try: _validate_job_queue(node, raid_controller) client = drac_common.get_drac_client(node) return client.clear_foreign_config(raid_controller) except drac_exceptions.BaseClientException as exc: LOG.error('DRAC driver failed to free foreign driver ' 'on %(raid_controller_fqdd)s ' 'for node %(node_uuid)s. ' 'Reason: %(error)s.', {'raid_controller_fqdd': raid_controller, 'node_uuid': node.uuid, 'error': exc}) raise exception.DracOperationError(error=exc) def set_raid_settings(node, controller_fqdd, settings): """Sets the RAID configuration It sets the pending_value parameter for each of the attributes passed in. For the values to be applied, a config job must be created. :param node: an ironic node object. :param controller_fqdd: the ID of the RAID controller. :param settings: a dictionary containing the proposed values, with each key being the name of attribute and the value being the proposed value. :returns: a dictionary containing: - The is_commit_required key with a boolean value indicating whether a config job must be created for the values to be applied. - The is_reboot_required key with a RebootRequired enumerated value indicating whether the server must be rebooted for the values to be applied. Possible values are true and false. :raises: DRACOperationFailed on error reported back by the DRAC interface """ try: drac_job.validate_job_queue(node) client = drac_common.get_drac_client(node) return client.set_raid_settings(controller_fqdd, settings) except drac_exceptions.BaseClientException as exc: LOG.error('DRAC driver failed to set raid settings ' 'on %(raid_controller_fqdd)s ' 'for node %(node_uuid)s. ' 'Reason: %(error)s.', {'raid_controller_fqdd': controller_fqdd, 'node_uuid': node.uuid, 'error': exc}) raise exception.DracOperationError(error=exc) def list_raid_settings(node): """List the RAID configuration settings :param node: an ironic node object. :returns: a dictionary with the RAID settings using InstanceID as the key. The attributes are RAIDEnumerableAttribute, RAIDStringAttribute and RAIDIntegerAttribute objects. :raises: DRACOperationFailed on error reported back by the DRAC interface """ try: drac_job.validate_job_queue(node) client = drac_common.get_drac_client(node) return client.list_raid_settings() except drac_exceptions.BaseClientException as exc: LOG.error('DRAC driver failed to list raid settings ' 'for node %(node_uuid)s. ' 'Reason: %(error)s.', {'node_uuid': node.uuid, 'error': exc}) raise exception.DracOperationError(error=exc) def change_physical_disk_state(node, mode=None, controllers_to_physical_disk_ids=None): """Convert disks RAID status This method converts the requested physical disks from RAID to JBOD or vice versa. It does this by only converting the disks that are not already in the correct state. :param node: an ironic node object. :param mode: the mode to change the disks either to RAID or JBOD. :param controllers_to_physical_disk_ids: Dictionary of controllers and corresponding disk ids to convert to the requested mode. :return: a dictionary containing: - conversion_results, a dictionary that maps controller ids to the conversion results for that controller. The conversion results are a dict that contains: - The is_commit_required key with the value always set to True indicating that a config job must be created to complete disk conversion. - The is_reboot_required key with a RebootRequired enumerated value indicating whether the server must be rebooted to complete disk conversion. :raises: DRACOperationError on an error from python-dracclient. """ try: drac_job.validate_job_queue(node) client = drac_common.get_drac_client(node) return client.change_physical_disk_state( mode, controllers_to_physical_disk_ids) except drac_exceptions.BaseClientException as exc: LOG.error('DRAC driver failed to change physical drives ' 'to %(mode)s mode for node %(node_uuid)s. ' 'Reason: %(error)s.', {'mode': mode, 'node_uuid': node.uuid, 'error': exc}) raise exception.DracOperationError(error=exc) def commit_config(node, raid_controller, reboot=False, realtime=False): """Apply all pending changes on a RAID controller. :param node: an ironic node object. :param raid_controller: id of the RAID controller. :param reboot: indicates whether a reboot job should be automatically created with the config job. (optional, defaults to False) :param realtime: indicates RAID controller supports realtime. (optional, defaults to False) :returns: id of the created job :raises: DracOperationError on an error from python-dracclient. """ client = drac_common.get_drac_client(node) try: return client.commit_pending_raid_changes( raid_controller=raid_controller, reboot=reboot, realtime=realtime) except drac_exceptions.BaseClientException as exc: LOG.error('DRAC driver failed to commit pending RAID config for' ' controller %(raid_controller_fqdd)s on node ' '%(node_uuid)s. Reason: %(error)s.', {'raid_controller_fqdd': raid_controller, 'node_uuid': node.uuid, 'error': exc}) raise exception.DracOperationError(error=exc) def _change_physical_disk_mode(node, mode=None, controllers_to_physical_disk_ids=None, substep="completed"): """Physical drives conversion from RAID to JBOD or vice-versa. :param node: an ironic node object. :param mode: the mode to change the disks either to RAID or JBOD. :param controllers_to_physical_disk_ids: Dictionary of controllers and corresponding disk ids to convert to the requested mode. :returns: states.CLEANWAIT if deletion is in progress asynchronously or None if it is completed. """ change_disk_state = change_physical_disk_state( node, mode, controllers_to_physical_disk_ids) controllers = list() conversion_results = change_disk_state['conversion_results'] for controller_id, result in conversion_results.items(): controller = {'raid_controller': controller_id, 'is_reboot_required': result['is_reboot_required'], 'is_commit_required': result['is_commit_required']} controllers.append(controller) return _commit_to_controllers( node, controllers, substep=substep) def abandon_config(node, raid_controller): """Deletes all pending changes on a RAID controller. :param node: an ironic node object. :param raid_controller: id of the RAID controller. :raises: DracOperationError on an error from python-dracclient. """ client = drac_common.get_drac_client(node) try: client.abandon_pending_raid_changes(raid_controller) except drac_exceptions.BaseClientException as exc: LOG.error('DRAC driver failed to delete pending RAID config ' 'for controller %(raid_controller_fqdd)s on node ' '%(node_uuid)s. Reason: %(error)s.', {'raid_controller_fqdd': raid_controller, 'node_uuid': node.uuid, 'error': exc}) raise exception.DracOperationError(error=exc) def _calculate_spans(raid_level, disks_count): """Calculates number of spans for a RAID level given a physical disk count :param raid_level: RAID level of the virtual disk. :param disk_count: number of physical disks used for the virtual disk. :returns: number of spans. """ if raid_level in ['0', '1', '5', '6']: return 1 elif raid_level in ['5+0', '6+0']: return 2 elif raid_level in ['1+0']: return disks_count >> 1 else: reason = (_('Cannot calculate spans for RAID level "%s"') % raid_level) raise exception.DracOperationError(error=reason) def _usable_disks_count(raid_level, disks_count): """Calculates the number of disks usable for a RAID level ...given a physical disk count :param raid_level: RAID level of the virtual disk. :param disk_count: number of physical disks used for the virtual disk. :returns: number of disks. """ if raid_level in ['0', '1', '5', '6']: return disks_count elif raid_level in ['5+0', '6+0', '1+0']: # largest even number less than disk_count return (disks_count >> 1) << 1 else: reason = (_('RAID level %(raid_level)s is not supported by the ' 'driver. Supported RAID levels: %(supported_raid_levels)s') % {'raid_level': raid_level, 'supported_raid_levels': list(RAID_LEVELS)}) raise exception.DracOperationError(error=reason) def _raid_level_min_disks(raid_level, spans_count=1): try: raid_level_info = RAID_LEVELS[raid_level] except KeyError: reason = (_('RAID level %(raid_level)s is not supported by the ' 'driver. Supported RAID levels: %(supported_raid_levels)s') % {'raid_level': raid_level, 'supported_raid_levels': list(RAID_LEVELS)}) raise exception.DracOperationError(error=reason) if raid_level_info['type'] == 'spanned': if spans_count <= 1: reason = _('Spanned RAID volumes cannot contain a single span') raise exception.DracOperationError(error=reason) span_type = raid_level_info['span_type'] raid_level_info = RAID_LEVELS[span_type] return raid_level_info['min_disks'] * spans_count def _raid_level_max_disks(raid_level, spans_count=1): try: raid_level_info = RAID_LEVELS[raid_level] except KeyError: reason = (_('RAID level %(raid_level)s is not supported by the ' 'driver. Supported RAID levels: %(supported_raid_levels)s') % {'raid_level': raid_level, 'supported_raid_levels': list(RAID_LEVELS)}) raise exception.DracOperationError(error=reason) if raid_level_info['type'] == 'spanned': if spans_count <= 1: reason = _('Spanned RAID volumes cannot contain a single span') raise exception.DracOperationError(error=reason) span_type = raid_level_info['span_type'] raid_level_info = RAID_LEVELS[span_type] return raid_level_info['max_disks'] * spans_count def _raid_level_overhead(raid_level, spans_count=1): try: raid_level_info = RAID_LEVELS[raid_level] except KeyError: reason = (_('RAID level %(raid_level)s is not supported by the ' 'driver. Supported RAID levels: %(supported_raid_levels)s') % {'raid_level': raid_level, 'supported_raid_levels': list(RAID_LEVELS)}) raise exception.DracOperationError(error=reason) if raid_level_info['type'] == 'spanned': if spans_count <= 1: reason = _('Spanned RAID volumes cannot contain a single span') raise exception.DracOperationError(error=reason) span_type = raid_level_info['span_type'] raid_level_info = RAID_LEVELS[span_type] return raid_level_info['overhead'] * spans_count def _max_volume_size_mb(raid_level, physical_disks, free_space_mb, spans_count=1, stripe_size_kb=64 * units.Ki): # restrict the size to the smallest available space free_spaces = [free_space_mb[disk] for disk in physical_disks] size_kb = min(free_spaces) * units.Ki # NOTE(ifarkas): using math.floor so we get a volume size that does not # exceed the available space stripes_per_disk = int(math.floor(float(size_kb) / stripe_size_kb)) disks_count = len(physical_disks) overhead_disks_count = _raid_level_overhead(raid_level, spans_count) return int(stripes_per_disk * stripe_size_kb * (disks_count - overhead_disks_count) / units.Ki) def _volume_usage_per_disk_mb(logical_disk, physical_disks, spans_count=1, stripe_size_kb=64 * units.Ki): disks_count = len(physical_disks) overhead_disks_count = _raid_level_overhead(logical_disk['raid_level'], spans_count) volume_size_kb = logical_disk['size_mb'] * units.Ki # NOTE(ifarkas): using math.ceil so we get the largest disk usage # possible, so we can avoid over-committing stripes_per_volume = math.ceil(float(volume_size_kb) / stripe_size_kb) stripes_per_disk = math.ceil( float(stripes_per_volume) / (disks_count - overhead_disks_count)) return int(stripes_per_disk * stripe_size_kb / units.Ki) def _find_configuration(logical_disks, physical_disks, pending_delete): """Find RAID configuration. This method transforms the RAID configuration defined in Ironic to a format that is required by dracclient. This includes matching the physical disks to RAID volumes when it's not pre-defined, or in general calculating missing properties. :param logical_disks: list of logical disk definitions. :param physical_disks: list of physical disk definitions. :param pending_delete: Whether there is a pending deletion of virtual disks that should be accounted for. """ # shared physical disks of RAID volumes size_gb='MAX' should be # deprioritized during the matching process to reserve as much space as # possible. Reserved means it won't be used during matching. volumes_with_reserved_physical_disks = [ volume for volume in logical_disks if ('physical_disks' in volume and volume['size_mb'] == 'MAX' and volume.get('share_physical_disks', False))] reserved_physical_disks = [ disk for disk in physical_disks for volume in volumes_with_reserved_physical_disks if disk.id in volume['physical_disks']] # we require each logical disk contain only homogeneous physical disks, so # sort them by type physical_disks_by_type = {} reserved_physical_disks_by_type = {} free_space_mb = {} for disk in physical_disks: # calculate free disk space free_space_mb[disk] = _get_disk_free_size_mb(disk, pending_delete) disk_type = (disk.controller, disk.media_type, disk.interface_type, disk.size_mb) if disk_type not in physical_disks_by_type: physical_disks_by_type[disk_type] = [] reserved_physical_disks_by_type[disk_type] = [] if disk in reserved_physical_disks: reserved_physical_disks_by_type[disk_type].append(disk) else: physical_disks_by_type[disk_type].append(disk) # exclude non-shared physical disks (predefined by the user) from # physical_disks_by_type because they are not going to be used during # matching for volume in logical_disks: if ('physical_disks' in volume and not volume.get('share_physical_disks', False)): for disk in physical_disks: if disk.id in volume['physical_disks']: disk_type = (disk.controller, disk.media_type, disk.interface_type, disk.size_mb) if disk in physical_disks_by_type[disk_type]: physical_disks_by_type[disk_type].remove(disk) processed_volumes = [] # step 1 - process volumes with predefined disks and exact size for volume in [volume for volume in logical_disks if ('physical_disks' in volume and volume['size_mb'] != 'MAX')]: _calculate_volume_props(volume, physical_disks, free_space_mb) processed_volumes.append(volume) # step 2 - process volumes without predefined disks volumes_without_disks = [disk for disk in logical_disks if 'physical_disks' not in disk] if volumes_without_disks: result, free_space_mb = ( _assign_disks_to_volume(volumes_without_disks, physical_disks_by_type, free_space_mb, pending_delete)) if not result: # try again using the reserved physical disks in addition for disk_type, disks in physical_disks_by_type.items(): physical_disks_by_type[disk_type] += ( reserved_physical_disks_by_type[disk_type]) result, free_space_mb = ( _assign_disks_to_volume(volumes_without_disks, physical_disks_by_type, free_space_mb, pending_delete)) if not result: error_msg = _('failed to find matching physical disks for all ' 'logical disks') LOG.error('DRAC driver failed to create RAID ' 'configuration. Reason: %(error)s.', {'error': error_msg}) raise exception.DracOperationError(error=error_msg) processed_volumes += volumes_without_disks # step 3 - process volumes with predefined disks and size_mb == 'MAX' for volume in [volume for volume in logical_disks if ('physical_disks' in volume and volume['size_mb'] == 'MAX')]: _calculate_volume_props(volume, physical_disks, free_space_mb) processed_volumes.append(volume) return processed_volumes def _calculate_volume_props(logical_disk, physical_disks, free_space_mb): selected_disks = [disk for disk in physical_disks if disk.id in logical_disk['physical_disks']] spans_count = _calculate_spans( logical_disk['raid_level'], len(selected_disks)) if len(selected_disks) % spans_count != 0: error_msg = _('invalid number of physical disks was provided') raise exception.DracOperationError(error=error_msg) disks_per_span = int(len(selected_disks) / spans_count) # Best practice is to not pass span_length and span_depth when creating a # RAID10. The iDRAC will dynamically calculate these values using maximum # values obtained from the RAID controller. logical_disk['span_depth'] = None logical_disk['span_length'] = None if logical_disk['raid_level'] != '1+0': logical_disk['span_depth'] = spans_count logical_disk['span_length'] = disks_per_span max_volume_size_mb = _max_volume_size_mb( logical_disk['raid_level'], selected_disks, free_space_mb, spans_count=spans_count) if logical_disk['size_mb'] == 'MAX': if max_volume_size_mb == 0: error_msg = _("size set to 'MAX' but could not allocate physical " "disk space") raise exception.DracOperationError(error=error_msg) logical_disk['size_mb'] = max_volume_size_mb elif max_volume_size_mb < logical_disk['size_mb']: if max_volume_size_mb == 0: error_msg = _('not enough physical disk space for the logical ' 'disk') raise exception.DracOperationError(error=error_msg) disk_usage = _volume_usage_per_disk_mb(logical_disk, selected_disks, spans_count=spans_count) for disk in selected_disks: if free_space_mb[disk] < disk_usage: error_msg = _('not enough free space on physical disks for the ' 'logical disk') raise exception.DracOperationError(error=error_msg) else: free_space_mb[disk] -= disk_usage if 'controller' not in logical_disk: logical_disk['controller'] = selected_disks[0].controller def _assign_disks_to_volume(logical_disks, physical_disks_by_type, free_space_mb, pending_delete): logical_disk = logical_disks.pop(0) raid_level = logical_disk['raid_level'] # iterate over all possible configurations for (controller, disk_type, interface_type, size_mb), disks in physical_disks_by_type.items(): if ('disk_type' in logical_disk and logical_disk['disk_type'] != disk_type): continue if ('interface_type' in logical_disk and logical_disk['interface_type'] != interface_type): continue # filter out disks without free disk space disks = [disk for disk in disks if free_space_mb[disk] > 0] # sort disks by free size which is important if we have max disks limit # on a volume disks = sorted( disks, key=lambda disk: free_space_mb[disk]) # filter out disks already in use if sharing is disabled if ('share_physical_disks' not in logical_disk or not logical_disk['share_physical_disks']): initial_free_size_mb = { disk: _get_disk_free_size_mb(disk, pending_delete) for disk in disks } disks = [disk for disk in disks if initial_free_size_mb[disk] == free_space_mb[disk]] max_spans = _calculate_spans(raid_level, len(disks)) min_spans = min([2, max_spans]) min_disks = _raid_level_min_disks(raid_level, spans_count=min_spans) max_disks = _raid_level_max_disks(raid_level, spans_count=max_spans) candidate_max_disks = min([max_disks, len(disks)]) for disks_count in range(min_disks, candidate_max_disks + 1): if ('number_of_physical_disks' in logical_disk and (logical_disk['number_of_physical_disks'] != disks_count)): continue # skip invalid disks_count if disks_count != _usable_disks_count(logical_disk['raid_level'], disks_count): continue selected_disks = disks[0:disks_count] candidate_volume = logical_disk.copy() candidate_free_space_mb = free_space_mb.copy() candidate_volume['physical_disks'] = [disk.id for disk in selected_disks] try: _calculate_volume_props(candidate_volume, selected_disks, candidate_free_space_mb) except exception.DracOperationError: continue if len(logical_disks) > 0: result, candidate_free_space_mb = ( _assign_disks_to_volume(logical_disks, physical_disks_by_type, candidate_free_space_mb, pending_delete)) if result: logical_disks.append(candidate_volume) return (True, candidate_free_space_mb) else: logical_disks.append(candidate_volume) return (True, candidate_free_space_mb) else: # put back the logical_disk to queue logical_disks.insert(0, logical_disk) return (False, free_space_mb) def _filter_logical_disks(logical_disks, include_root_volume, include_nonroot_volumes): filtered_disks = [] for disk in logical_disks: if include_root_volume and disk.get('is_root_volume'): filtered_disks.append(disk) if include_nonroot_volumes and not disk.get('is_root_volume'): filtered_disks.append(disk) return filtered_disks def _create_config_job(node, controller, reboot=False, realtime=False, raid_config_job_ids=[], raid_config_parameters=[]): job_id = commit_config(node, raid_controller=controller, reboot=reboot, realtime=realtime) raid_config_job_ids.append(job_id) if controller not in raid_config_parameters: raid_config_parameters.append(controller) LOG.info('Change has been committed to RAID controller ' '%(controller)s on node %(node)s. ' 'DRAC job id: %(job_id)s', {'controller': controller, 'node': node.uuid, 'job_id': job_id}) return {'raid_config_job_ids': raid_config_job_ids, 'raid_config_parameters': raid_config_parameters} def _validate_volume_size(node, logical_disks): new_physical_disks = list_physical_disks(node) free_space_mb = {} new_processed_volumes = [] for disk in new_physical_disks: free_space_mb[disk] = disk.free_size_mb for logical_disk in logical_disks: selected_disks = [disk for disk in new_physical_disks if disk.id in logical_disk['physical_disks']] spans_count = _calculate_spans( logical_disk['raid_level'], len(selected_disks)) new_max_vol_size_mb = _max_volume_size_mb( logical_disk['raid_level'], selected_disks, free_space_mb, spans_count=spans_count) if logical_disk['size_mb'] > new_max_vol_size_mb: logical_disk['size_mb'] = new_max_vol_size_mb LOG.info("Logical size does not match so calculating volume " "properties for current logical_disk") _calculate_volume_props( logical_disk, new_physical_disks, free_space_mb) new_processed_volumes.append(logical_disk) if new_processed_volumes: return new_processed_volumes return logical_disks def _switch_to_raid_mode(node, controller_fqdd): """Convert the controller mode from Enhanced HBA to RAID mode :param node: an ironic node object :param controller_fqdd: the ID of the RAID controller. :returns: a dictionary containing - The raid_controller key with a ID of the RAID controller value. - The is_commit_required needed key with a boolean value indicating whether a config job must be created for the values to be applied. - The is_reboot_required key with a RebootRequired enumerated value indicating whether the server must be rebooted to switch the controller mode to RAID. """ # wait for pending jobs to complete drac_job.wait_for_job_completion(node) raid_attr = "{}:{}".format(controller_fqdd, _REQUESTED_RAID_CONTROLLER_MODE) settings = {raid_attr: _RAID_MODE} settings_results = set_raid_settings( node, controller_fqdd, settings) controller = { 'raid_controller': controller_fqdd, 'is_reboot_required': settings_results['is_reboot_required'], 'is_commit_required': settings_results['is_commit_required']} return controller def _commit_to_controllers(node, controllers, substep="completed"): """Commit changes to RAID controllers on the node. :param node: an ironic node object :param controllers: a list of dictionary containing - The raid_controller key with raid controller fqdd value indicating on which raid configuration job needs to be perform. - The is_commit_required needed key with a boolean value indicating whether a config job must be created. - The is_reboot_required key with a RebootRequired enumerated value indicating whether the server must be rebooted only if raid controller does not support realtime. :param substep: contain sub cleaning or deploy step which executes any raid configuration job if set after cleaning or deploy step. (default to completed) :returns: states.CLEANWAIT (cleaning) or states.DEPLOYWAIT (deployment) if configuration is in progress asynchronously or None if it is completed. """ # remove controller which does not require configuration job controllers = [controller for controller in controllers if controller['is_commit_required']] if not controllers: LOG.debug('No changes on any of the controllers on node %s', node.uuid) driver_internal_info = node.driver_internal_info driver_internal_info['raid_config_substep'] = substep driver_internal_info['raid_config_parameters'] = [] node.driver_internal_info = driver_internal_info node.save() return driver_internal_info = node.driver_internal_info driver_internal_info['raid_config_substep'] = substep driver_internal_info['raid_config_parameters'] = [] if 'raid_config_job_ids' not in driver_internal_info: driver_internal_info['raid_config_job_ids'] = [] optional = drac_constants.RebootRequired.optional # all realtime controllers all_realtime = all( (cntlr['is_reboot_required'] == optional) and not(cntlr.get('is_ehba_mode')) for cntlr in controllers) # check any controller with ehba mode any_ehba_controllers = any( cntrl.get('is_ehba_mode') is True for cntrl in controllers) raid_config_job_ids = [] raid_config_parameters = [] if all_realtime: for controller in controllers: realtime_controller = controller['raid_controller'] job_details = _create_config_job( node, controller=realtime_controller, reboot=False, realtime=True, raid_config_job_ids=raid_config_job_ids, raid_config_parameters=raid_config_parameters) elif any_ehba_controllers: commit_to_ehba_controllers = [] for controller in controllers: if controller.get('is_ehba_mode'): job_details = _create_config_job( node, controller=controller['raid_controller'], reboot=False, realtime=True, raid_config_job_ids=raid_config_job_ids, raid_config_parameters=raid_config_parameters) ehba_controller = _switch_to_raid_mode( node, controller['raid_controller']) commit_to_ehba_controllers.append( ehba_controller['raid_controller']) else: job_details = _create_config_job( node, controller=controller['raid_controller'], reboot=False, realtime=False, raid_config_job_ids=raid_config_job_ids, raid_config_parameters=raid_config_parameters) for controller in commit_to_ehba_controllers: LOG.debug("Create job with Reboot to apply configuration " "changes for ehba controllers") job_details = _create_config_job( node, controller=controller, reboot=(controller == commit_to_ehba_controllers[-1]), realtime=False, raid_config_job_ids=raid_config_job_ids, raid_config_parameters=raid_config_parameters) else: for controller in controllers: mix_controller = controller['raid_controller'] reboot = (controller == controllers[-1]) job_details = _create_config_job( node, controller=mix_controller, reboot=reboot, realtime=False, raid_config_job_ids=raid_config_job_ids, raid_config_parameters=raid_config_parameters) driver_internal_info['raid_config_job_ids'].extend(job_details[ 'raid_config_job_ids']) driver_internal_info['raid_config_parameters'].extend(job_details[ 'raid_config_parameters']) node.driver_internal_info = driver_internal_info # Signal whether the node has been rebooted, that we do not need to execute # the step again, and that this completion of this step is triggered # through async polling. # NOTE(mgoddard): set_async_step_flags calls node.save(). deploy_utils.set_async_step_flags( node, reboot=not all_realtime, skip_current_step=True, polling=True) return deploy_utils.get_async_step_return_state(node) def _create_virtual_disks(task, node): logical_disks_to_create = node.driver_internal_info[ 'logical_disks_to_create'] # Check valid properties attached to voiume after drives conversion isVolValidationNeeded = node.driver_internal_info[ 'volume_validation'] if isVolValidationNeeded: logical_disks_to_create = _validate_volume_size( node, logical_disks_to_create) controllers = list() for logical_disk in logical_disks_to_create: controller = dict() controller_cap = create_virtual_disk( node, raid_controller=logical_disk['controller'], physical_disks=logical_disk['physical_disks'], raid_level=logical_disk['raid_level'], size_mb=logical_disk['size_mb'], disk_name=logical_disk.get('name'), span_length=logical_disk.get('span_length'), span_depth=logical_disk.get('span_depth')) controller['raid_controller'] = logical_disk['controller'] controller['is_reboot_required'] = controller_cap[ 'is_reboot_required'] controller['is_commit_required'] = controller_cap[ 'is_commit_required'] if controller not in controllers: controllers.append(controller) return _commit_to_controllers(node, controllers) def _controller_in_hba_mode(raid_settings, controller_fqdd): controller_mode = raid_settings.get( '{}:{}'.format(controller_fqdd, _CURRENT_RAID_CONTROLLER_MODE)) return _EHBA_MODE in controller_mode.current_value def _controller_supports_ehba_mode(settings, controller_fqdd): raid_cntrl_attr = "{}:{}".format(controller_fqdd, _CURRENT_RAID_CONTROLLER_MODE) current_cntrl_mode = settings.get(raid_cntrl_attr) if not current_cntrl_mode: return False else: return _EHBA_MODE in current_cntrl_mode.possible_values def _get_disk_free_size_mb(disk, pending_delete): """Return the size of free space on the disk in MB. :param disk: a PhysicalDisk object. :param pending_delete: Whether there is a pending deletion of all virtual disks. """ return disk.size_mb if pending_delete else disk.free_size_mb class DracWSManRAID(base.RAIDInterface): def get_properties(self): """Return the properties of the interface.""" return drac_common.COMMON_PROPERTIES @base.deploy_step(priority=0, argsinfo=base.RAID_APPLY_CONFIGURATION_ARGSINFO) def apply_configuration(self, task, raid_config, create_root_volume=True, create_nonroot_volumes=False, delete_existing=True): return super(DracWSManRAID, self).apply_configuration( task, raid_config, create_root_volume=create_root_volume, create_nonroot_volumes=create_nonroot_volumes, delete_existing=delete_existing) @METRICS.timer('DracRAID.create_configuration') @base.clean_step(priority=0, abortable=False, argsinfo={ 'create_root_volume': { 'description': ( 'This specifies whether to create the root volume. ' 'Defaults to `True`.' ), 'required': False }, 'create_nonroot_volumes': { 'description': ( 'This specifies whether to create the non-root volumes. ' 'Defaults to `True`.' ), 'required': False }, "delete_existing": { "description": ( "Setting this to 'True' indicates to delete existing RAID " "configuration prior to creating the new configuration. " "Default value is 'False'." ), "required": False, } }) def create_configuration(self, task, create_root_volume=True, create_nonroot_volumes=True, delete_existing=False): """Create the RAID configuration. This method creates the RAID configuration on the given node. :param task: a TaskManager instance containing the node to act on. :param create_root_volume: If True, a root volume is created during RAID configuration. Otherwise, no root volume is created. Default is True. :param create_nonroot_volumes: If True, non-root volumes are created. If False, no non-root volumes are created. Default is True. :param delete_existing: Setting this to True indicates to delete RAID configuration prior to creating the new configuration. Default is False. :returns: states.CLEANWAIT (cleaning) or states.DEPLOYWAIT (deployment) if creation is in progress asynchronously or None if it is completed. :raises: MissingParameterValue, if node.target_raid_config is missing or empty. :raises: DracOperationError on an error from python-dracclient. """ node = task.node logical_disks = node.target_raid_config['logical_disks'] for disk in logical_disks: if disk['size_gb'] == 'MAX' and 'physical_disks' not in disk: raise exception.InvalidParameterValue( _("create_configuration called with invalid " "target_raid_configuration for node %(node_id)s. " "'physical_disks' is missing from logical_disk while " "'size_gb'='MAX' was requested: " "%(logical_disk)s") % {'node_id': node.uuid, 'logical_disk': disk}) if disk['size_gb'] == 'MAX': disk['size_mb'] = 'MAX' else: disk['size_mb'] = disk['size_gb'] * units.Ki del disk['size_gb'] if delete_existing: self._delete_configuration_no_commit(task) physical_disks = list_physical_disks(node) logical_disks = _find_configuration(logical_disks, physical_disks, pending_delete=delete_existing) logical_disks_to_create = _filter_logical_disks( logical_disks, create_root_volume, create_nonroot_volumes) controllers_to_physical_disk_ids = defaultdict(list) for logical_disk in logical_disks_to_create: # Not applicable to JBOD logical disks. if logical_disk['raid_level'] == 'JBOD': continue for physical_disk_name in logical_disk['physical_disks']: controllers_to_physical_disk_ids[ logical_disk['controller']].append( physical_disk_name) # adding logical_disks to driver_internal_info to create virtual disks driver_internal_info = node.driver_internal_info driver_internal_info[ "logical_disks_to_create"] = logical_disks_to_create commit_results = None if logical_disks_to_create: LOG.debug( "Converting physical disks configured to back RAID " "logical disks to RAID mode for node %(node_uuid)s ", {"node_uuid": node.uuid}) raid_mode = drac_constants.RaidStatus.raid commit_results = _change_physical_disk_mode( node, raid_mode, controllers_to_physical_disk_ids, substep="create_virtual_disks") volume_validation = True if commit_results else False driver_internal_info['volume_validation'] = volume_validation node.driver_internal_info = driver_internal_info node.save() if commit_results: return commit_results else: LOG.debug("Controller does not support drives conversion " "so creating virtual disks") return _create_virtual_disks(task, node) @METRICS.timer('DracRAID.delete_configuration') @base.clean_step(priority=0) @base.deploy_step(priority=0) def delete_configuration(self, task): """Delete the RAID configuration. :param task: a TaskManager instance containing the node to act on. :returns: states.CLEANWAIT (cleaning) or states.DEPLOYWAIT (deployment) if deletion is in progress asynchronously or None if it is completed. :raises: DracOperationError on an error from python-dracclient. """ controllers = self._delete_configuration_no_commit(task) return _commit_to_controllers(task.node, controllers, substep="delete_foreign_config") @METRICS.timer('DracRAID.get_logical_disks') def get_logical_disks(self, task): """Get the RAID configuration of the node. :param task: a TaskManager instance containing the node to act on. :returns: A dictionary of properties. :raises: DracOperationError on an error from python-dracclient. """ node = task.node logical_disks = [] for disk in list_virtual_disks(node): logical_disk = { 'id': disk.id, 'controller': disk.controller, 'size_gb': int(disk.size_mb / units.Ki), 'raid_level': disk.raid_level } if disk.name is not None: logical_disk['name'] = disk.name logical_disks.append(logical_disk) return {'logical_disks': logical_disks} @METRICS.timer('DracRAID._query_raid_config_job_status') @periodics.periodic( spacing=CONF.drac.query_raid_config_job_status_interval) def _query_raid_config_job_status(self, manager, context): """Periodic task to check the progress of running RAID config jobs.""" filters = {'reserved': False, 'maintenance': False}
for (node_uuid, driver, conductor_group, driver_internal_info) in node_list: try: lock_purpose = 'checking async raid configuration jobs' with task_manager.acquire(context, node_uuid, purpose=lock_purpose, shared=True) as task: if not isinstance(task.driver.raid, DracWSManRAID): continue job_ids = driver_internal_info.get('raid_config_job_ids') if not job_ids: continue self._check_node_raid_jobs(task) except exception.NodeNotFound: LOG.info("During query_raid_config_job_status, node " "%(node)s was not found and presumed deleted by " "another process.", {'node': node_uuid}) except exception.NodeLocked: LOG.info("During query_raid_config_job_status, node " "%(node)s was already locked by another process. " "Skip.", {'node': node_uuid}) @METRICS.timer('DracRAID._check_node_raid_jobs') def _check_node_raid_jobs(self, task): """Check the progress of running RAID config jobs of a node.""" node = task.node raid_config_job_ids = node.driver_internal_info['raid_config_job_ids'] finished_job_ids = [] for config_job_id in raid_config_job_ids: config_job = drac_job.get_job(node, job_id=config_job_id) if config_job is None or config_job.status == 'Completed': finished_job_ids.append(config_job_id) elif config_job.status == 'Failed': finished_job_ids.append(config_job_id) self._set_raid_config_job_failure(node) if not finished_job_ids: return task.upgrade_lock() self._delete_cached_config_job_id(node, finished_job_ids) if not node.driver_internal_info.get('raid_config_job_failure', False): if 'raid_config_substep' in node.driver_internal_info: substep = node.driver_internal_info['raid_config_substep'] if substep == 'delete_foreign_config': foreign_drives = self._execute_foreign_drives(task, node) if foreign_drives is None: return self._convert_drives(task, node) elif substep == 'physical_disk_conversion': self._convert_drives(task, node) elif substep == "create_virtual_disks": return _create_virtual_disks(task, node) elif substep == 'completed': self._complete_raid_substep(task, node) else: self._complete_raid_substep(task, node) else: self._clear_raid_substep(node) self._clear_raid_config_job_failure(node) self._set_failed(task, config_job) def _execute_foreign_drives(self, task, node): controllers = list() jobs_required = False for controller_id in node.driver_internal_info[ 'raid_config_parameters']: controller_cap = clear_foreign_config( node, controller_id) controller = { 'raid_controller': controller_id, 'is_reboot_required': controller_cap['is_reboot_required'], 'is_commit_required': controller_cap['is_commit_required']} controllers.append(controller) jobs_required = jobs_required or controller_cap[ 'is_commit_required'] if not jobs_required: LOG.info( "No foreign drives detected, so " "resume %s", "cleaning" if node.clean_step else "deployment") return None else: return _commit_to_controllers( node, controllers, substep='physical_disk_conversion') def _complete_raid_substep(self, task, node): self._clear_raid_substep(node) self._resume(task) def _convert_drives(self, task, node): jbod = drac_constants.RaidStatus.jbod drives_results = _change_physical_disk_mode( node, mode=jbod) if drives_results is None: LOG.debug("Controller does not support drives " "conversion on %(node_uuid)s", {'node_uuid': node.uuid}) self._complete_raid_substep(task, node) def _clear_raid_substep(self, node): driver_internal_info = node.driver_internal_info driver_internal_info.pop('raid_config_substep', None) driver_internal_info.pop('raid_config_parameters', None) node.driver_internal_info = driver_internal_info node.save() def _set_raid_config_job_failure(self, node): driver_internal_info = node.driver_internal_info driver_internal_info['raid_config_job_failure'] = True node.driver_internal_info = driver_internal_info node.save() def _clear_raid_config_job_failure(self, node): driver_internal_info = node.driver_internal_info del driver_internal_info['raid_config_job_failure'] node.driver_internal_info = driver_internal_info node.save() def _delete_cached_config_job_id(self, node, finished_config_job_ids=None): if finished_config_job_ids is None: finished_config_job_ids = [] driver_internal_info = node.driver_internal_info unfinished_job_ids = [job_id for job_id in driver_internal_info['raid_config_job_ids'] if job_id not in finished_config_job_ids] driver_internal_info['raid_config_job_ids'] = unfinished_job_ids node.driver_internal_info = driver_internal_info node.save() def _set_failed(self, task, config_job): error_msg = (_("Failed config job: %(config_job_id)s. " "Message: '%(message)s'.") % {'config_job_id': config_job.id, 'message': config_job.message}) log_msg = ("RAID configuration job failed for node %(node)s. " "%(error)s" % {'node': task.node.uuid, 'error': error_msg}) if task.node.clean_step: manager_utils.cleaning_error_handler(task, error_msg) else: manager_utils.deploying_error_handler(task, log_msg, error_msg) def _resume(self, task): raid_common.update_raid_info( task.node, self.get_logical_disks(task)) if task.node.clean_step: manager_utils.notify_conductor_resume_clean(task) else: manager_utils.notify_conductor_resume_deploy(task) def _delete_configuration_no_commit(self, task): """Delete existing RAID configuration without committing the change. :param task: A TaskManager instance. :returns: A set of names of RAID controllers which need RAID changes to be committed. """ node = task.node controllers = list() drac_raid_controllers = list_raid_controllers(node) drac_raid_settings = list_raid_settings(node) for cntrl in drac_raid_controllers: if _is_raid_controller(node, cntrl.id, drac_raid_controllers): controller = dict() if _controller_supports_ehba_mode( drac_raid_settings, cntrl.id) and _controller_in_hba_mode( drac_raid_settings, cntrl.id): controller['is_ehba_mode'] = True controller_cap = _reset_raid_config(node, cntrl.id) controller["raid_controller"] = cntrl.id controller["is_reboot_required"] = controller_cap[ "is_reboot_required"] controller["is_commit_required"] = controller_cap[ "is_commit_required"] controllers.append(controller) return controllers class DracRAID(DracWSManRAID): """Class alias of class DracWSManRAID. This class provides ongoing support of the deprecated 'idrac' RAID interface implementation entrypoint. All bug fixes and new features should be implemented in its base class, DracWSManRAID. That makes them available to both the deprecated 'idrac' and new 'idrac-wsman' entrypoints. Such changes should not be made to this class. """ def __init__(self): super(DracRAID, self).__init__() LOG.warning("RAID interface 'idrac' is deprecated and may be removed " "in a future release. Use 'idrac-wsman' instead.")
fields = ['driver_internal_info'] node_list = manager.iter_nodes(fields=fields, filters=filters)
controller_workflow_integration_test.go
// +build integration package cmd_test import ( "encoding/json" "fmt" "io/ioutil" "os" "path/filepath" "testing" v1 "github.com/jenkins-x/jx/pkg/apis/jenkins.io/v1" "github.com/jenkins-x/jx/pkg/gits" "github.com/jenkins-x/jx/pkg/helm" "github.com/jenkins-x/jx/pkg/jx/cmd" "github.com/jenkins-x/jx/pkg/jx/cmd/opts" "github.com/jenkins-x/jx/pkg/kube" resources_test "github.com/jenkins-x/jx/pkg/kube/resources/mocks" "github.com/jenkins-x/jx/pkg/log" "github.com/jenkins-x/jx/pkg/workflow" "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/runtime" ) func TestSequentialWorkflow(t *testing.T) { testOrgName := "jstrachan" testRepoName := "myrepo" stagingRepoName := "environment-staging" prodRepoName := "environment-production" fakeRepo := gits.NewFakeRepository(testOrgName, testRepoName) stagingRepo := gits.NewFakeRepository(testOrgName, stagingRepoName) prodRepo := gits.NewFakeRepository(testOrgName, prodRepoName) fakeGitProvider := gits.NewFakeProvider(fakeRepo, stagingRepo, prodRepo) fakeGitProvider.User.Username = testOrgName staging := kube.NewPermanentEnvironmentWithGit("staging", "https://fake.git/"+testOrgName+"/"+stagingRepoName+".git") production := kube.NewPermanentEnvironmentWithGit("production", "https://fake.git/"+testOrgName+"/"+prodRepoName+".git") staging.Spec.Order = 100 production.Spec.Order = 200 configureGitFn := func(dir string, gitInfo *gits.GitRepository, gitter gits.Gitter) error { err := gitter.Init(dir) if err != nil { return err } // Really we should have a dummy environment chart but for now let's just mock it out as needed err = os.MkdirAll(filepath.Join(dir, "templates"), 0700) if err != nil { return err } data, err := json.Marshal(staging) if err != nil { return err } err = ioutil.WriteFile(filepath.Join(dir, "templates", "environment-staging.yaml"), data, 0755) if err != nil { return err } data, err = json.Marshal(production) if err != nil { return err } err = ioutil.WriteFile(filepath.Join(dir, "templates", "environment-production.yaml"), data, 0755) if err != nil { return err } return gitter.AddCommit(dir, "Initial Commit") } o := &cmd.ControllerWorkflowOptions{ ControllerOptions: cmd.ControllerOptions{ CommonOptions: &opts.CommonOptions{}, }, NoWatch: true, Namespace: "jx", ConfigureGitFn: configureGitFn, } myFlowName := "myflow" step1 := workflow.CreateWorkflowPromoteStep("staging") step2 := workflow.CreateWorkflowPromoteStep("production", step1) cmd.ConfigureTestOptionsWithResources(o.CommonOptions, []runtime.Object{}, []runtime.Object{ staging, production, kube.NewPreviewEnvironment("jx-jstrachan-demo96-pr-1"), kube.NewPreviewEnvironment("jx-jstrachan-another-pr-3"), workflow.CreateWorkflow("jx", myFlowName, step1, step2, ), }, gits.NewGitLocal(), fakeGitProvider, helm.NewHelmCLI("helm", helm.V2, "", true), resources_test.NewMockInstaller(), ) err := cmd.CreateTestEnvironmentDir(o.CommonOptions) assert.NoError(t, err) jxClient, ns, err := o.JXClientAndDevNamespace() assert.NoError(t, err) if err == nil { workflow, err := workflow.GetWorkflow("", jxClient, ns) assert.NoError(t, err) if err == nil { assert.Equal(t, "default", workflow.Name, "name") spec := workflow.Spec assert.Equal(t, 2, len(spec.Steps), "number of steps") if len(spec.Steps) > 0 { cmd.AssertPromoteStep(t, &spec.Steps[0], "staging") } if len(spec.Steps) > 1 { cmd.AssertPromoteStep(t, &spec.Steps[1], "production") } } } a, err := cmd.CreateTestPipelineActivity(jxClient, ns, testOrgName, testRepoName, "master", "1", myFlowName) assert.NoError(t, err) if err != nil { return } err = o.Run() assert.NoError(t, err) if err != nil { return } activities := jxClient.JenkinsV1().PipelineActivities(ns) cmd.AssertHasPullRequestForEnv(t, activities, a.Name, "staging") cmd.AssertWorkflowStatus(t, activities, a.Name, v1.ActivityStatusTypeRunning) // lets make sure we don't create a PR for production as we have not completed the staging PR yet err = o.Run() cmd.AssertHasNoPullRequestForEnv(t, activities, a.Name, "production") // still no PR merged so cannot create a PR for production cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertHasNoPullRequestForEnv(t, activities, a.Name, "production") // test no PR on production until staging completed if !cmd.AssertSetPullRequestMerged(t, fakeGitProvider, stagingRepo.Owner, stagingRepo.GitRepo.Name, 1) { return } cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertHasNoPullRequestForEnv(t, activities, a.Name, "production") if !cmd.AssertSetPullRequestComplete(t, fakeGitProvider, stagingRepo, 1) { return } // now lets poll again due to change to the activity to detect the staging is complete cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertHasPromoteStatus(t, activities, a.Name, "staging", v1.ActivityStatusTypeSucceeded) cmd.AssertHasPullRequestForEnv(t, activities, a.Name, "production") cmd.AssertHasPromoteStatus(t, activities, a.Name, "production", v1.ActivityStatusTypeRunning) cmd.AssertHasPipelineStatus(t, activities, a.Name, v1.ActivityStatusTypeRunning) if !cmd.AssertSetPullRequestMerged(t, fakeGitProvider, prodRepo.Owner, prodRepo.GitRepo.Name, 1) { return } if !cmd.AssertSetPullRequestComplete(t, fakeGitProvider, prodRepo, 1) { return } cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertHasPromoteStatus(t, activities, a.Name, "staging", v1.ActivityStatusTypeSucceeded) cmd.AssertHasPromoteStatus(t, activities, a.Name, "production", v1.ActivityStatusTypeSucceeded) cmd.AssertAllPromoteStepsSuccessful(t, activities, a.Name) } func TestWorkflowManualPromote(t *testing.T)
// TestParallelWorkflow lets test promoting to A + B then when A + B is complete then C func TestParallelWorkflow(t *testing.T) { testOrgName := "jstrachan" testRepoName := "parallelrepo" envNameA := "a" envNameB := "b" envNameC := "c" envRepoNameA := "environment-" + envNameA envRepoNameB := "environment-" + envNameB envRepoNameC := "environment-" + envNameC fakeRepo := gits.NewFakeRepository(testOrgName, testRepoName) repoA := gits.NewFakeRepository(testOrgName, envRepoNameA) repoB := gits.NewFakeRepository(testOrgName, envRepoNameB) repoC := gits.NewFakeRepository(testOrgName, envRepoNameC) fakeGitProvider := gits.NewFakeProvider(fakeRepo, repoA, repoB, repoC) envA := kube.NewPermanentEnvironmentWithGit(envNameA, "https://fake.git/"+testOrgName+"/"+envRepoNameA+".git") envB := kube.NewPermanentEnvironmentWithGit(envNameB, "https://fake.git/"+testOrgName+"/"+envRepoNameB+".git") envC := kube.NewPermanentEnvironmentWithGit(envNameC, "https://fake.git/"+testOrgName+"/"+envRepoNameC+".git") configureGitFn := func(dir string, gitInfo *gits.GitRepository, gitter gits.Gitter) error { err := gitter.Init(dir) if err != nil { return err } // Really we should have a dummy environment chart but for now let's just mock it out as needed err = os.MkdirAll(filepath.Join(dir, "templates"), 0700) if err != nil { return err } data, err := json.Marshal(envA) if err != nil { return err } err = ioutil.WriteFile(filepath.Join(dir, "templates", fmt.Sprintf("%s.yaml", envRepoNameA)), data, 0755) if err != nil { return err } data, err = json.Marshal(envB) if err != nil { return err } err = ioutil.WriteFile(filepath.Join(dir, "templates", fmt.Sprintf("%s.yaml", envRepoNameB)), data, 0755) if err != nil { return err } data, err = json.Marshal(envC) if err != nil { return err } err = ioutil.WriteFile(filepath.Join(dir, "templates", fmt.Sprintf("%s.yaml", envRepoNameC)), data, 0755) if err != nil { return err } return gitter.AddCommit(dir, "Initial Commit") } o := &cmd.ControllerWorkflowOptions{ ControllerOptions: cmd.ControllerOptions{ CommonOptions: &opts.CommonOptions{}, }, NoWatch: true, Namespace: "jx", ConfigureGitFn: configureGitFn, } myFlowName := "myflow" step1 := workflow.CreateWorkflowPromoteStep(envNameA) step2 := workflow.CreateWorkflowPromoteStep(envNameB) step3 := workflow.CreateWorkflowPromoteStep(envNameC, step1, step2) cmd.ConfigureTestOptionsWithResources(o.CommonOptions, []runtime.Object{}, []runtime.Object{ envA, envB, envC, kube.NewPreviewEnvironment("jx-jstrachan-demo96-pr-1"), kube.NewPreviewEnvironment("jx-jstrachan-another-pr-3"), workflow.CreateWorkflow("jx", myFlowName, step1, step2, step3, ), }, gits.NewGitLocal(), fakeGitProvider, helm.NewHelmCLI("helm", helm.V2, "", true), resources_test.NewMockInstaller(), ) err := cmd.CreateTestEnvironmentDir(o.CommonOptions) assert.NoError(t, err) jxClient, ns, err := o.JXClientAndDevNamespace() assert.NoError(t, err) if err == nil { workflow, err := workflow.GetWorkflow("", jxClient, ns) assert.NoError(t, err) if err == nil { assert.Equal(t, "default", workflow.Name, "name") spec := workflow.Spec assert.Equal(t, 3, len(spec.Steps), "number of steps") if len(spec.Steps) > 0 { cmd.AssertPromoteStep(t, &spec.Steps[0], envNameA) } if len(spec.Steps) > 1 { cmd.AssertPromoteStep(t, &spec.Steps[1], envNameB) } if len(spec.Steps) > 2 { cmd.AssertPromoteStep(t, &spec.Steps[2], envNameC) } } } a, err := cmd.CreateTestPipelineActivity(jxClient, ns, testOrgName, testRepoName, "master", "1", myFlowName) assert.NoError(t, err) if err != nil { return } err = o.Run() assert.NoError(t, err) if err != nil { return } activities := jxClient.JenkinsV1().PipelineActivities(ns) cmd.AssertHasPullRequestForEnv(t, activities, a.Name, envNameA) cmd.AssertHasPullRequestForEnv(t, activities, a.Name, envNameB) cmd.AssertWorkflowStatus(t, activities, a.Name, v1.ActivityStatusTypeRunning) // lets make sure we don't create a PR for production as we have not completed the staging PR yet err = o.Run() cmd.AssertHasNoPullRequestForEnv(t, activities, a.Name, envNameC) // still no PR merged so cannot create a PR for C until A and B complete cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertHasNoPullRequestForEnv(t, activities, a.Name, envNameC) // test no PR on production until staging completed if !cmd.AssertSetPullRequestMerged(t, fakeGitProvider, repoA.Owner, repoA.GitRepo.Name, 1) { return } cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertHasNoPullRequestForEnv(t, activities, a.Name, envNameC) if !cmd.AssertSetPullRequestComplete(t, fakeGitProvider, repoA, 1) { return } // now lets poll again due to change to the activity to detect the staging is complete cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertHasNoPullRequestForEnv(t, activities, a.Name, envNameC) cmd.AssertHasPromoteStatus(t, activities, a.Name, envNameA, v1.ActivityStatusTypeSucceeded) cmd.AssertHasPromoteStatus(t, activities, a.Name, envNameB, v1.ActivityStatusTypeRunning) cmd.AssertHasPipelineStatus(t, activities, a.Name, v1.ActivityStatusTypeRunning) if !cmd.AssertSetPullRequestMerged(t, fakeGitProvider, repoB.Owner, repoB.GitRepo.Name, 1) { return } if !cmd.AssertSetPullRequestComplete(t, fakeGitProvider, repoB, 1) { return } cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) // C should have started now cmd.AssertHasPullRequestForEnv(t, activities, a.Name, envNameC) cmd.AssertHasPromoteStatus(t, activities, a.Name, envNameA, v1.ActivityStatusTypeSucceeded) cmd.AssertHasPromoteStatus(t, activities, a.Name, envNameB, v1.ActivityStatusTypeSucceeded) cmd.AssertHasPromoteStatus(t, activities, a.Name, envNameC, v1.ActivityStatusTypeRunning) if !cmd.AssertSetPullRequestMerged(t, fakeGitProvider, repoC.Owner, repoC.GitRepo.Name, 1) { return } if !cmd.AssertSetPullRequestComplete(t, fakeGitProvider, repoC, 1) { return } // should be complete now cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertHasPromoteStatus(t, activities, a.Name, envNameA, v1.ActivityStatusTypeSucceeded) cmd.AssertHasPromoteStatus(t, activities, a.Name, envNameB, v1.ActivityStatusTypeSucceeded) cmd.AssertHasPromoteStatus(t, activities, a.Name, envNameC, v1.ActivityStatusTypeSucceeded) cmd.AssertAllPromoteStepsSuccessful(t, activities, a.Name) } // TestNewVersionWhileExistingWorkflow lets test that we create a new workflow and terminate // the old workflow if we find a new version func TestNewVersionWhileExistingWorkflow(t *testing.T) { testOrgName := "jstrachan" testRepoName := "myrepo" stagingRepoName := "environment-staging" prodRepoName := "environment-production" fakeRepo := gits.NewFakeRepository(testOrgName, testRepoName) stagingRepo := gits.NewFakeRepository(testOrgName, stagingRepoName) prodRepo := gits.NewFakeRepository(testOrgName, prodRepoName) fakeGitProvider := gits.NewFakeProvider(fakeRepo, stagingRepo, prodRepo) staging := kube.NewPermanentEnvironmentWithGit("staging", "https://fake.git/"+testOrgName+"/"+stagingRepoName+".git") production := kube.NewPermanentEnvironmentWithGit("production", "https://fake.git/"+testOrgName+"/"+prodRepoName+".git") staging.Spec.Order = 100 production.Spec.Order = 200 configureGitFn := func(dir string, gitInfo *gits.GitRepository, gitter gits.Gitter) error { err := gitter.Init(dir) if err != nil { return err } // Really we should have a dummy environment chart but for now let's just mock it out as needed err = os.MkdirAll(filepath.Join(dir, "templates"), 0700) if err != nil { return err } data, err := json.Marshal(staging) if err != nil { return err } err = ioutil.WriteFile(filepath.Join(dir, "templates", "environment-staging.yaml"), data, 0755) if err != nil { return err } data, err = json.Marshal(production) if err != nil { return err } err = ioutil.WriteFile(filepath.Join(dir, "templates", "environment-production.yaml"), data, 0755) if err != nil { return err } return gitter.AddCommit(dir, "Initial Commit") } o := &cmd.ControllerWorkflowOptions{ ControllerOptions: cmd.ControllerOptions{ CommonOptions: &opts.CommonOptions{}, }, NoWatch: true, Namespace: "jx", ConfigureGitFn: configureGitFn, } myFlowName := "myflow" step1 := workflow.CreateWorkflowPromoteStep("staging") step2 := workflow.CreateWorkflowPromoteStep("production", step1) cmd.ConfigureTestOptionsWithResources(o.CommonOptions, []runtime.Object{}, []runtime.Object{ staging, production, kube.NewPreviewEnvironment("jx-jstrachan-demo96-pr-1"), kube.NewPreviewEnvironment("jx-jstrachan-another-pr-3"), workflow.CreateWorkflow("jx", myFlowName, step1, step2, ), }, gits.NewGitLocal(), fakeGitProvider, helm.NewHelmCLI("helm", helm.V2, "", true), resources_test.NewMockInstaller(), ) err := cmd.CreateTestEnvironmentDir(o.CommonOptions) assert.NoError(t, err) jxClient, ns, err := o.JXClientAndDevNamespace() assert.NoError(t, err) if err == nil { workflow, err := workflow.GetWorkflow("", jxClient, ns) assert.NoError(t, err) if err == nil { assert.Equal(t, "default", workflow.Name, "name") spec := workflow.Spec assert.Equal(t, 2, len(spec.Steps), "number of steps") if len(spec.Steps) > 0 { cmd.AssertPromoteStep(t, &spec.Steps[0], "staging") } if len(spec.Steps) > 1 { cmd.AssertPromoteStep(t, &spec.Steps[1], "production") } } } a, err := cmd.CreateTestPipelineActivity(jxClient, ns, testOrgName, testRepoName, "master", "1", myFlowName) assert.NoError(t, err) if err != nil { return } err = o.Run() assert.NoError(t, err) if err != nil { return } activities := jxClient.JenkinsV1().PipelineActivities(ns) cmd.AssertHasPullRequestForEnv(t, activities, a.Name, "staging") cmd.AssertWorkflowStatus(t, activities, a.Name, v1.ActivityStatusTypeRunning) // lets trigger a new pipeline release which should close the old version aOld := a a, err = cmd.CreateTestPipelineActivity(jxClient, ns, testOrgName, testRepoName, "master", "2", myFlowName) cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertHasPullRequestForEnv(t, activities, a.Name, "staging") cmd.AssertWorkflowStatus(t, activities, a.Name, v1.ActivityStatusTypeRunning) // lets make sure we don't create a PR for production as we have not completed the staging PR yet cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertHasNoPullRequestForEnv(t, activities, a.Name, "production") cmd.AssertWorkflowStatus(t, activities, aOld.Name, v1.ActivityStatusTypeAborted) // still no PR merged so cannot create a PR for production cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertHasNoPullRequestForEnv(t, activities, a.Name, "production") // test no PR on production until staging completed if !cmd.AssertSetPullRequestMerged(t, fakeGitProvider, stagingRepo.Owner, stagingRepo.GitRepo.Name, 2) { return } cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertHasNoPullRequestForEnv(t, activities, a.Name, "production") if !cmd.AssertSetPullRequestComplete(t, fakeGitProvider, stagingRepo, 2) { return } // now lets poll again due to change to the activity to detect the staging is complete cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertHasPromoteStatus(t, activities, a.Name, "staging", v1.ActivityStatusTypeSucceeded) cmd.AssertHasPullRequestForEnv(t, activities, a.Name, "production") cmd.AssertHasPromoteStatus(t, activities, a.Name, "production", v1.ActivityStatusTypeRunning) cmd.AssertHasPipelineStatus(t, activities, a.Name, v1.ActivityStatusTypeRunning) if !cmd.AssertSetPullRequestMerged(t, fakeGitProvider, prodRepo.Owner, prodRepo.GitRepo.Name, 1) { return } if !cmd.AssertSetPullRequestComplete(t, fakeGitProvider, prodRepo, 1) { return } cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertHasPromoteStatus(t, activities, a.Name, "staging", v1.ActivityStatusTypeSucceeded) cmd.AssertHasPromoteStatus(t, activities, a.Name, "production", v1.ActivityStatusTypeSucceeded) cmd.AssertAllPromoteStepsSuccessful(t, activities, a.Name) } func TestPullRequestNumber(t *testing.T) { failUrls := []string{"https://fake.git/foo/bar/pulls"} for _, u := range failUrls { _, err := cmd.PullRequestURLToNumber(u) assert.Errorf(t, err, "Expected error for pullRequestURLToNumber() with %s", u) } tests := map[string]int{ "https://fake.git/foo/bar/pulls/12": 12, } for u, expected := range tests { actual, err := cmd.PullRequestURLToNumber(u) assert.NoError(t, err, "pullRequestURLToNumber() should not fail for %s", u) if err == nil { assert.Equal(t, expected, actual, "pullRequestURLToNumber() for %s", u) } } }
{ testOrgName := "jstrachan" testRepoName := "manual" stagingRepoName := "environment-staging" prodRepoName := "environment-production" fakeRepo := gits.NewFakeRepository(testOrgName, testRepoName) stagingRepo := gits.NewFakeRepository(testOrgName, stagingRepoName) prodRepo := gits.NewFakeRepository(testOrgName, prodRepoName) fakeGitProvider := gits.NewFakeProvider(fakeRepo, stagingRepo, prodRepo) fakeGitProvider.User.Username = testOrgName staging := kube.NewPermanentEnvironmentWithGit("staging", "https://fake.git/"+testOrgName+"/"+stagingRepoName+".git") production := kube.NewPermanentEnvironmentWithGit("production", "https://fake.git/"+testOrgName+"/"+prodRepoName+".git") production.Spec.PromotionStrategy = v1.PromotionStrategyTypeManual configureGitFn := func(dir string, gitInfo *gits.GitRepository, gitter gits.Gitter) error { err := gitter.Init(dir) if err != nil { return err } // Really we should have a dummy environment chart but for now let's just mock it out as needed err = os.MkdirAll(filepath.Join(dir, "templates"), 0700) if err != nil { return err } data, err := json.Marshal(staging) if err != nil { return err } err = ioutil.WriteFile(filepath.Join(dir, "templates", "environment-staging.yaml"), data, 0755) if err != nil { return err } data, err = json.Marshal(production) if err != nil { return err } err = ioutil.WriteFile(filepath.Join(dir, "templates", "environment-production.yaml"), data, 0755) if err != nil { return err } return gitter.AddCommit(dir, "Initial Commit") } o := &cmd.ControllerWorkflowOptions{ ControllerOptions: cmd.ControllerOptions{ CommonOptions: &opts.CommonOptions{}, }, NoWatch: true, Namespace: "jx", ConfigureGitFn: configureGitFn, } workflowName := "default" cmd.ConfigureTestOptionsWithResources(o.CommonOptions, []runtime.Object{}, []runtime.Object{ staging, production, kube.NewPreviewEnvironment("jx-jstrachan-demo96-pr-1"), kube.NewPreviewEnvironment("jx-jstrachan-another-pr-3"), }, gits.NewGitLocal(), fakeGitProvider, helm.NewHelmCLI("helm", helm.V2, "", true), resources_test.NewMockInstaller(), ) err := cmd.CreateTestEnvironmentDir(o.CommonOptions) assert.NoError(t, err) jxClient, ns, err := o.JXClientAndDevNamespace() assert.NoError(t, err) a, err := cmd.CreateTestPipelineActivity(jxClient, ns, testOrgName, testRepoName, "master", "1", workflowName) assert.NoError(t, err) if err != nil { return } err = o.Run() assert.NoError(t, err) if err != nil { return } activities := jxClient.JenkinsV1().PipelineActivities(ns) cmd.AssertHasPullRequestForEnv(t, activities, a.Name, "staging") cmd.AssertWorkflowStatus(t, activities, a.Name, v1.ActivityStatusTypeRunning) // lets make sure we don't create a PR for production as its manual cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertHasNoPullRequestForEnv(t, activities, a.Name, "production") if !cmd.AssertSetPullRequestMerged(t, fakeGitProvider, stagingRepo.Owner, stagingRepo.GitRepo.Name, 1) { return } if !cmd.AssertSetPullRequestComplete(t, fakeGitProvider, stagingRepo, 1) { return } cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertWorkflowStatus(t, activities, a.Name, v1.ActivityStatusTypeSucceeded) cmd.AssertHasNoPullRequestForEnv(t, activities, a.Name, "production") cmd.AssertHasPromoteStatus(t, activities, a.Name, "staging", v1.ActivityStatusTypeSucceeded) cmd.AssertAllPromoteStepsSuccessful(t, activities, a.Name) // now lets do a manual promotion version := a.Spec.Version po := &cmd.PromoteOptions{ Application: testRepoName, Environment: "production", Pipeline: a.Spec.Pipeline, Build: a.Spec.Build, Version: version, NoPoll: true, IgnoreLocalFiles: true, HelmRepositoryURL: helm.DefaultHelmRepositoryURL, LocalHelmRepoName: kube.LocalHelmRepoName, Namespace: "jx", ConfigureGitCallback: configureGitFn, } po.CommonOptions = o.CommonOptions po.BatchMode = true log.Infof("Promoting to production version %s for app %s\n", version, testRepoName) err = po.Run() assert.NoError(t, err) if err != nil { return } cmd.AssertHasPullRequestForEnv(t, activities, a.Name, "production") cmd.AssertWorkflowStatus(t, activities, a.Name, v1.ActivityStatusTypeRunning) cmd.AssertHasPipelineStatus(t, activities, a.Name, v1.ActivityStatusTypeRunning) cmd.AssertHasPromoteStatus(t, activities, a.Name, "staging", v1.ActivityStatusTypeSucceeded) cmd.AssertHasPromoteStatus(t, activities, a.Name, "production", v1.ActivityStatusTypeRunning) cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertHasPromoteStatus(t, activities, a.Name, "staging", v1.ActivityStatusTypeSucceeded) cmd.AssertHasPromoteStatus(t, activities, a.Name, "production", v1.ActivityStatusTypeRunning) cmd.AssertWorkflowStatus(t, activities, a.Name, v1.ActivityStatusTypeSucceeded) cmd.AssertHasPipelineStatus(t, activities, a.Name, v1.ActivityStatusTypeSucceeded) cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertHasPromoteStatus(t, activities, a.Name, "staging", v1.ActivityStatusTypeSucceeded) cmd.AssertHasPromoteStatus(t, activities, a.Name, "production", v1.ActivityStatusTypeRunning) cmd.AssertWorkflowStatus(t, activities, a.Name, v1.ActivityStatusTypeSucceeded) cmd.AssertHasPipelineStatus(t, activities, a.Name, v1.ActivityStatusTypeSucceeded) if !cmd.AssertSetPullRequestMerged(t, fakeGitProvider, prodRepo.Owner, prodRepo.GitRepo.Name, 1) { return } cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertHasPromoteStatus(t, activities, a.Name, "staging", v1.ActivityStatusTypeSucceeded) cmd.AssertHasPromoteStatus(t, activities, a.Name, "production", v1.ActivityStatusTypeRunning) cmd.AssertWorkflowStatus(t, activities, a.Name, v1.ActivityStatusTypeSucceeded) cmd.AssertHasPipelineStatus(t, activities, a.Name, v1.ActivityStatusTypeSucceeded) cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertHasPromoteStatus(t, activities, a.Name, "staging", v1.ActivityStatusTypeSucceeded) cmd.AssertHasPromoteStatus(t, activities, a.Name, "production", v1.ActivityStatusTypeRunning) cmd.AssertWorkflowStatus(t, activities, a.Name, v1.ActivityStatusTypeSucceeded) cmd.AssertHasPipelineStatus(t, activities, a.Name, v1.ActivityStatusTypeSucceeded) if !cmd.AssertSetPullRequestComplete(t, fakeGitProvider, prodRepo, 1) { return } cmd.PollGitStatusAndReactToPipelineChanges(t, o, jxClient, ns) cmd.AssertHasPromoteStatus(t, activities, a.Name, "staging", v1.ActivityStatusTypeSucceeded) cmd.AssertHasPromoteStatus(t, activities, a.Name, "production", v1.ActivityStatusTypeSucceeded) cmd.AssertAllPromoteStepsSuccessful(t, activities, a.Name) }
connection.rs
use crate::error::Result; use bitvec::prelude::{BitArray, BitVec, Lsb0}; use bitvec::{bitarr, bitvec}; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::TcpStream; pub(crate) struct Connection { // status is a bitfield // 0b000x = self choked // 0b00x0 = self interested // 0b0x00 = peer choked // 0bx000 = peer interested status: BitArray<Lsb0, [u8; 1]>, bitfield: BitVec, conn: TcpStream, } impl Connection { const MAX_PIECE_LENGTH: u32 = 1 << 14; // 16 KiB // bitfield markers for `[Connection::status]` const SELF_CHOKED: u8 = 1 << 0; const SELF_INTERESTED: u8 = 1 << 1; const PEER_CHOKED: u8 = 1 << 2; const PEER_INTERESTED: u8 = 1 << 3; pub(crate) async fn handshake( mut conn: TcpStream, info_hash: &[u8], peer_id: &[u8], total_pieces: usize, ) -> Option<Connection> { // Handshake layout: // length | value // -------+------------------- // 1 | 19 (\x13) => 1 // 19 | Bittorrent Protocol // 8 | extn flags \x00 * 8 // 20 | sha-1 // 20 | peer_id // -- | total // 68 let (mut rx, mut tx) = conn.split(); let b: BitArray<Lsb0, [u8; 1]> = bitarr![Lsb0, u8; 0, 1, 0, 1]; // write our end of the handshake let send = async { let prefix = b"\x13Bittorrent Protocol\x00\x00\x00\x00\x00\x00\x00\x00"; tx.write_all(prefix).await?; tx.write_all(info_hash).await?; tx.write_all(peer_id).await?; Ok(()) }; // read a bittorrent greeting let recv = async { let err = Err(std::io::Error::from(std::io::ErrorKind::Other)); let mut buffer = vec![0; 20]; // protocol prefix rx.read_exact(&mut buffer).await?; if &buffer[..19] != b"\x13Bittorrent Protocol" { return err; } // extension flags rx.read_exact(&mut buffer[..8]).await?; if !&buffer[..8].iter().all(|b| *b == 0) { return err; } // info_hash rx.read_exact(&mut buffer).await?; if buffer != info_hash { return err; } // peer id buffer.fill(0); rx.read_exact(&mut buffer[..]).await?; String::from_utf8(buffer).or(err) }; let (_, peer_id) = futures::try_join!(send, recv).ok()?; let t = bitvec![0; total_pieces]; let t2: &[u8] = t.as_raw_slice(). Some(Connection { status: bitarr![const Lsb0, u8; 0, 1, 0, 1], bitfield: bitvec![0; total_pieces], conn, }) } pub(crate) async fn decode_frame(&mut self) -> Result<Message> { // message format: <length: u32> <message type: u8> <payload?: Vec<u8>> let length = self.conn.read_u32().await.unwrap(); if length == 0 { return Ok(Message::KeepAlive); } Ok(match self.conn.read_u8().await.unwrap() { 0 if length == 1 => Message::Choke, 1 if length == 1 => Message::Unchoke, 2 if length == 1 => Message::Interested, 3 if length == 1 => Message::NotInterested, 4 if length == 5 => Message::Have(self.conn.read_u32().await.unwrap()), 5 if length == (1 + self.bitfield.len()) as u32 => Message::Bitfield(vec![]), // todo - verify bitfield length 6 if length == 13 => Message::Request { index: self.conn.read_u32().await.unwrap(), begin: self.conn.read_u32().await.unwrap(), length: self.conn.read_u32().await.unwrap(), }, 7 if length >= 9 && length - 9 < Self::MAX_PIECE_LENGTH => Message::Piece { index: self.conn.read_u32().await.unwrap(), begin: self.conn.read_u32().await.unwrap(), block: vec![], }, 8 if length == 13 => Message::Cancel { index: self.conn.read_u32().await.unwrap(), begin: self.conn.read_u32().await.unwrap(), length: self.conn.read_u32().await.unwrap(), }, 9 if length == 3 => Message::Port(self.conn.read_u16().await.unwrap()), _ => return Err(crate::error::Error::NoTrackerAvailable), // todo - remove }) } } pub(crate) enum Message { KeepAlive, // | len = 0 Choke, // id = 0 | len = 1 Unchoke, // id = 1 | len = 1 Interested, // id = 2 | len = 1 NotInterested, // id = 3 | len = 1
Request { index: u32, begin: u32, length: u32, }, // id = 7 | len = 9+x Piece { index: u32, begin: u32, block: Vec<u8>, }, // id = 8 | len = 13 Cancel { index: u32, begin: u32, length: u32, }, Port(/* listen port */ u16), // id = 9 | len = 3 } #[cfg(test)] mod test { use bitvec::prelude::*; use std::mem::{size_of, size_of_val}; #[test] fn arr_size() { let b: BitArray<Lsb0, [u32; 2]> = bitarr![Lsb0, u32; 0; 55]; let b2: BitArray = bitarr![0; 4]; // let b2: usize = 0; println!("{}", size_of_val(&b)); println!("{}", size_of_val(&b2)); println!("{}", size_of::<usize>()); } }
Have(/* piece index */ u32), // id = 4 | len = 5 Bitfield(/* bitfield */ Vec<u8>), // id = 5 | len = 1+x // id = 6 | len = 13
instances_get.go
package main import ( "fmt" "net/http" "sort" "strconv" "strings" "sync" "time" "github.com/gorilla/mux" "github.com/pkg/errors" "github.com/lxc/lxd/lxd/cluster" "github.com/lxc/lxd/lxd/db" "github.com/lxc/lxd/lxd/db/query" "github.com/lxc/lxd/lxd/filter" "github.com/lxc/lxd/lxd/instance" "github.com/lxc/lxd/lxd/instance/instancetype" "github.com/lxc/lxd/lxd/rbac" "github.com/lxc/lxd/lxd/response" "github.com/lxc/lxd/shared" "github.com/lxc/lxd/shared/api" "github.com/lxc/lxd/shared/logger" "github.com/lxc/lxd/shared/version" ) // urlInstanceTypeDetect detects what sort of instance type filter is being requested. Either // explicitly via the instance-type query param or implicitly via the endpoint URL used. func urlInstanceTypeDetect(r *http.Request) (instancetype.Type, error) { reqInstanceType := r.URL.Query().Get("instance-type") if strings.HasPrefix(mux.CurrentRoute(r).GetName(), "container") { return instancetype.Container, nil } else if strings.HasPrefix(mux.CurrentRoute(r).GetName(), "vm") { return instancetype.VM, nil } else if reqInstanceType != "" { instanceType, err := instancetype.New(reqInstanceType) if err != nil { return instancetype.Any, err } return instanceType, nil } return instancetype.Any, nil } // swagger:operation GET /1.0/instances instances instances_get // // Get the instances // // Returns a list of instances (URLs). // // --- // produces: // - application/json // parameters: // - in: query // name: project // description: Project name // type: string // example: default // - in: query // name: filter // description: Collection filter // type: string // example: default // - in: query // name: all-projects // description: Retrieve instances from all projects // type: boolean // responses: // "200": // description: API endpoints // schema: // type: object // description: Sync response // properties: // type: // type: string // description: Response type // example: sync // status: // type: string // description: Status description // example: Success // status_code: // type: integer // description: Status code // example: 200 // metadata: // type: array // description: List of endpoints // items: // type: string // example: |- // [ // "/1.0/instances/foo", // "/1.0/instances/bar" // ] // "403": // $ref: "#/responses/Forbidden" // "500": // $ref: "#/responses/InternalServerError" // swagger:operation GET /1.0/instances?recursion=1 instances instances_get_recursion1 // // Get the instances // // Returns a list of instances (basic structs). // // --- // produces: // - application/json // parameters: // - in: query // name: project // description: Project name // type: string // example: default // - in: query // name: filter // description: Collection filter // type: string // example: default // - in: query // name: all-projects // description: Retrieve instances from all projects // type: boolean // responses: // "200": // description: API endpoints // schema: // type: object // description: Sync response // properties: // type: // type: string // description: Response type // example: sync // status: // type: string // description: Status description // example: Success // status_code: // type: integer // description: Status code // example: 200 // metadata: // type: array // description: List of instances // items: // $ref: "#/definitions/Instance" // "403": // $ref: "#/responses/Forbidden" // "500": // $ref: "#/responses/InternalServerError" // swagger:operation GET /1.0/instances?recursion=2 instances instances_get_recursion2 // // Get the instances // // Returns a list of instances (full structs). // // The main difference between recursion=1 and recursion=2 is that the // latter also includes state and snapshot information allowing for a // single API call to return everything needed by most clients. // // --- // produces: // - application/json // parameters: // - in: query // name: project // description: Project name // type: string // example: default // - in: query // name: filter // description: Collection filter // type: string // example: default // - in: query // name: all-projects // description: Retrieve instances from all projects // type: boolean // responses: // "200": // description: API endpoints // schema: // type: object // description: Sync response // properties: // type: // type: string // description: Response type // example: sync // status: // type: string // description: Status description // example: Success // status_code: // type: integer // description: Status code // example: 200 // metadata: // type: array // description: List of instances // items: // $ref: "#/definitions/InstanceFull" // "403": // $ref: "#/responses/Forbidden" // "500": // $ref: "#/responses/InternalServerError" func instancesGet(d *Daemon, r *http.Request) response.Response { for i := 0; i < 100; i++ { result, err := doInstancesGet(d, r) if err == nil { return response.SyncResponse(true, result) } if !query.IsRetriableError(err) { logger.Debugf("DBERR: containersGet: error %q", err) return response.SmartError(err) } // 100 ms may seem drastic, but we really don't want to thrash // perhaps we should use a random amount time.Sleep(100 * time.Millisecond) } logger.Debugf("DBERR: containersGet, db is locked") logger.Debugf(logger.GetStack()) return response.InternalError(fmt.Errorf("DB is locked")) } func doInstancesGet(d *Daemon, r *http.Request) (interface{}, error) { resultString := []string{} resultList := []*api.Instance{} resultFullList := []*api.InstanceFull{} resultMu := sync.Mutex{} instanceType, err := urlInstanceTypeDetect(r) if err != nil { return nil, err } // Parse the recursion field recursionStr := r.FormValue("recursion") recursion, err := strconv.Atoi(recursionStr) if err != nil { recursion = 0 } // Parse filter value filterStr := r.FormValue("filter") var clauses []filter.Clause if filterStr != "" { clauses, err = filter.Parse(filterStr) if err != nil { return nil, errors.Wrap(err, "Invalid filter") } } // Parse the project field projectName := projectParam(r) // Parse all-projects field allProjects := r.FormValue("all-projects") // Get the list and location of all containers var result map[string][]string // Containers by node address var nodes map[string]string // Node names by container filteredProjects := []string{} err = d.cluster.Transaction(func(tx *db.ClusterTx) error { var err error if allProjects == "true" { projects, err := tx.GetProjects(db.ProjectFilter{}) if err != nil { return err } for _, project := range projects { if !rbac.UserHasPermission(r, project.Name, "view") { continue } filteredProjects = append(filteredProjects, project.Name) } } else { filteredProjects = []string{projectName} } result, err = tx.GetInstanceNamesByNodeAddress(filteredProjects, db.InstanceTypeFilter(instanceType)) if err != nil { return err } nodes, err = tx.GetInstanceToNodeMap(filteredProjects, db.InstanceTypeFilter(instanceType)) if err != nil { return err } return nil }) if err != nil { return []string{}, err } // Get the local instances nodeInstances := map[string]instance.Instance{} mustLoadObjects := recursion > 0 || (recursion == 0 && clauses != nil) if mustLoadObjects { for _, project := range filteredProjects { insts, err := instanceLoadNodeProjectAll(d.State(), project, instanceType) if err != nil { return nil, err } for _, inst := range insts { nodeInstances[inst.Name()] = inst } } } // Append containers to list and handle errors resultListAppend := func(name string, c api.Instance, err error) { if err != nil { c = api.Instance{ Name: name, Status: api.Error.String(), StatusCode: api.Error, Location: nodes[name], } } resultMu.Lock() resultList = append(resultList, &c) resultMu.Unlock() } resultFullListAppend := func(name string, c api.InstanceFull, err error) { if err != nil { c = api.InstanceFull{Instance: api.Instance{ Name: name, Status: api.Error.String(), StatusCode: api.Error, Location: nodes[name], }} } resultMu.Lock() resultFullList = append(resultFullList, &c) resultMu.Unlock() } // Get the data wg := sync.WaitGroup{} networkCert := d.endpoints.NetworkCert() for address, instanceNames := range result { // If this is an internal request from another cluster node, // ignore containers from other nodes, and return only the ones // on this node if isClusterNotification(r) && address != "" { continue } // Mark containers on unavailable nodes as down if mustLoadObjects && address == "0.0.0.0" { for _, instanceName := range instanceNames { if recursion < 2 { resultListAppend(instanceName, api.Instance{}, fmt.Errorf("unavailable")) } else { resultFullListAppend(instanceName, api.InstanceFull{}, fmt.Errorf("unavailable")) } } continue } // For recursion requests we need to fetch the state of remote // containers from their respective nodes. if mustLoadObjects && address != "" && !isClusterNotification(r) { wg.Add(1) go func(address string, containers []string) { defer wg.Done() if recursion == 1 { cs, err := doContainersGetFromNode(filteredProjects, address, allProjects, networkCert, d.serverCert(), r, instanceType) if err != nil { for _, name := range containers { resultListAppend(name, api.Instance{}, err) } return } for _, c := range cs { resultListAppend(c.Name, c, nil) } return } cs, err := doContainersFullGetFromNode(filteredProjects, address, allProjects, networkCert, d.serverCert(), r, instanceType) if err != nil { for _, name := range containers { resultFullListAppend(name, api.InstanceFull{}, err) } return } for _, c := range cs { resultFullListAppend(c.Name, c, nil) } }(address, instanceNames) continue } if !mustLoadObjects { for _, instanceName := range instanceNames { instancePath := "instances" if strings.HasPrefix(mux.CurrentRoute(r).GetName(), "container") { instancePath = "containers" } else if strings.HasPrefix(mux.CurrentRoute(r).GetName(), "vm") { instancePath = "virtual-machines" } url := fmt.Sprintf("/%s/%s/%s", version.APIVersion, instancePath, instanceName) resultString = append(resultString, url) } } else { threads := 4 if len(instanceNames) < threads { threads = len(instanceNames) } queue := make(chan string, threads) for i := 0; i < threads; i++ { wg.Add(1) go func() { for { instanceName, more := <-queue if !more { break } inst, found := nodeInstances[instanceName] if !found { continue } if recursion < 2 { c, _, err := inst.Render() if err != nil { resultListAppend(instanceName, api.Instance{}, err) } else { resultListAppend(instanceName, *c.(*api.Instance), err) } continue } c, _, err := inst.RenderFull() if err != nil { resultFullListAppend(instanceName, api.InstanceFull{}, err) } else { resultFullListAppend(instanceName, *c, err) } } wg.Done() }() } for _, instanceName := range instanceNames { queue <- instanceName } close(queue) } } wg.Wait() if recursion == 0 { if clauses != nil { for _, container := range instance.Filter(resultList, clauses) { instancePath := "instances" if strings.HasPrefix(mux.CurrentRoute(r).GetName(), "container") { instancePath = "containers" } else if strings.HasPrefix(mux.CurrentRoute(r).GetName(), "vm") { instancePath = "virtual-machines" } url := fmt.Sprintf("/%s/%s/%s", version.APIVersion, instancePath, container.Name) resultString = append(resultString, url) } } return resultString, nil } if recursion == 1 { // Sort the result list by name. sort.Slice(resultList, func(i, j int) bool { return resultList[i].Name < resultList[j].Name }) if clauses != nil { resultList = instance.Filter(resultList, clauses) } return resultList, nil } // Sort the result list by name. sort.Slice(resultFullList, func(i, j int) bool { return resultFullList[i].Name < resultFullList[j].Name }) if clauses != nil { resultFullList = instance.FilterFull(resultFullList, clauses) } return resultFullList, nil } // Fetch information about the containers on the given remote node, using the // rest API and with a timeout of 30 seconds. func doContainersGetFromNode(projects []string, node, allProjects string, networkCert *shared.CertInfo, serverCert *shared.CertInfo, r *http.Request, instanceType instancetype.Type) ([]api.Instance, error) { f := func() ([]api.Instance, error) { client, err := cluster.Connect(node, networkCert, serverCert, r, true) if err != nil { return nil, errors.Wrapf(err, "Failed to connect to node %s", node) } var containers []api.Instance if allProjects == "true" { containers, err = client.GetInstancesAllProjects(api.InstanceType(instanceType.String())) if err != nil { return nil, errors.Wrapf(err, "Failed to get instances from node %s", node) } } else { for _, project := range projects { client = client.UseProject(project) tmpContainers, err := client.GetInstances(api.InstanceType(instanceType.String())) if err != nil { return nil, errors.Wrapf(err, "Failed to get instances from node %s", node) } containers = append(containers, tmpContainers...) } } return containers, nil } timeout := time.After(30 * time.Second) done := make(chan struct{}) var containers []api.Instance var err error go func() { containers, err = f() done <- struct{}{} }() select { case <-timeout: err = fmt.Errorf("Timeout getting instances from node %s", node) case <-done: } return containers, err } func doContainersFullGetFromNode(projects []string, node, allProjects string, networkCert *shared.CertInfo, serverCert *shared.CertInfo, r *http.Request, instanceType instancetype.Type) ([]api.InstanceFull, error) { f := func() ([]api.InstanceFull, error) { client, err := cluster.Connect(node, networkCert, serverCert, r, true)
var instances []api.InstanceFull if allProjects == "true" { instances, err = client.GetInstancesFullAllProjects(api.InstanceType(instanceType.String())) if err != nil { return nil, errors.Wrapf(err, "Failed to get instances from node %s", node) } } else { for _, project := range projects { client = client.UseProject(project) tmpInstances, err := client.GetInstancesFull(api.InstanceType(instanceType.String())) if err != nil { return nil, errors.Wrapf(err, "Failed to get instances from node %s", node) } instances = append(instances, tmpInstances...) } } return instances, nil } timeout := time.After(30 * time.Second) done := make(chan struct{}) var instances []api.InstanceFull var err error go func() { instances, err = f() done <- struct{}{} }() select { case <-timeout: err = fmt.Errorf("Timeout getting instances from node %s", node) case <-done: } return instances, err }
if err != nil { return nil, errors.Wrapf(err, "Failed to connect to node %s", node) }
dsio.go
// Package dsio defines writers & readers for operating on "container" data structures (objects and arrays) package dsio import ( "fmt" "io" logger "github.com/ipfs/go-log" "github.com/qri-io/dataset" ) var log = logger.Logger("dsio") // EntryWriter is a generalized interface for writing structured data type EntryWriter interface { // Structure gives the structure being written Structure() *dataset.Structure // WriteEntry writes one "row" of structured data to the Writer WriteEntry(Entry) error // Close finalizes the writer, indicating all entries // have been written Close() error } // EntryReader is a generalized interface for reading Ordered Structured Data type EntryReader interface { // Structure gives the structure being read Structure() *dataset.Structure // ReadVal reads one row of structured data from the reader ReadEntry() (Entry, error) // Close finalizes the Reader Close() error } // EntryReadWriter combines EntryWriter and EntryReader behaviors type EntryReadWriter interface { // Structure gives the structure being read and written Structure() *dataset.Structure // ReadVal reads one row of structured data from the reader ReadEntry() (Entry, error) // WriteEntry writes one row of structured data to the ReadWriter WriteEntry(Entry) error // Close finalizes the ReadWriter, indicating all entries // have been written Close() error // Bytes gives the raw contents of the ReadWriter Bytes() []byte } // NewEntryReader allocates a EntryReader based on a given structure func NewEntryReader(st *dataset.Structure, r io.Reader) (EntryReader, error) { switch st.DataFormat() { case dataset.CBORDataFormat: return NewCBORReader(st, r) case dataset.JSONDataFormat: return NewJSONReader(st, r) case dataset.CSVDataFormat: return NewCSVReader(st, r), nil case dataset.XLSXDataFormat: return NewXLSXReader(st, r) case dataset.UnknownDataFormat: err := fmt.Errorf("structure must have a data format") log.Debug(err.Error()) return nil, err default: err := fmt.Errorf("invalid format to create reader: %s", st.Format) log.Debug(err.Error()) return nil, err } } // NewEntryWriter allocates a EntryWriter based on a given structure func NewEntryWriter(st *dataset.Structure, w io.Writer) (EntryWriter, error) { switch st.DataFormat() { case dataset.CBORDataFormat: return NewCBORWriter(st, w) case dataset.JSONDataFormat: return NewJSONWriter(st, w) case dataset.CSVDataFormat: return NewCSVWriter(st, w), nil case dataset.XLSXDataFormat: return NewXLSXWriter(st, w) case dataset.UnknownDataFormat: err := fmt.Errorf("structure must have a data format") log.Debug(err.Error()) return nil, err default: err := fmt.Errorf("invalid format to create writer: %s", st.Format) log.Debug(err.Error())
} } // GetTopLevelType returns the top-level type of the structure, only if it is // a valid type ("array" or "object"), otherwise returns an error func GetTopLevelType(st *dataset.Structure) (string, error) { // tlt := st.Schema.TopLevelType() if st.Schema == nil { return "", fmt.Errorf("a schema object is required") } tlt, ok := st.Schema["type"].(string) if !ok { return "", fmt.Errorf("schema top level 'type' value must be either 'array' or 'object'") } if tlt != "array" && tlt != "object" { return "", fmt.Errorf("invalid schema. root must be either an array or object type") } return tlt, nil }
return nil, err
server_push_test.go
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build go1.8 package http2 import ( "errors" "fmt" "io" "io/ioutil" "net/http" "reflect" "strconv" "sync" "testing" "time" ) func TestServer_Push_Success(t *testing.T) { const ( mainBody = "<html>index page</html>" pushedBody = "<html>pushed page</html>" userAgent = "testagent" cookie = "testcookie" ) var stURL string checkPromisedReq := func(r *http.Request, wantMethod string, wantH http.Header) error { if got, want := r.Method, wantMethod; got != want { return fmt.Errorf("promised Req.Method=%q, want %q", got, want) } if got, want := r.Header, wantH; !reflect.DeepEqual(got, want) { return fmt.Errorf("promised Req.Header=%q, want %q", got, want) } if got, want := "https://"+r.Host, stURL; got != want { return fmt.Errorf("promised Req.Host=%q, want %q", got, want) } if r.Body == nil { return fmt.Errorf("nil Body") } if buf, err := ioutil.ReadAll(r.Body); err != nil || len(buf) != 0 { return fmt.Errorf("ReadAll(Body)=%q,%v, want '',nil", buf, err) } return nil } errc := make(chan error, 3) st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { switch r.URL.RequestURI() { case "/": // Push "/pushed?get" as a GET request, using an absolute URL. opt := &http.PushOptions{ Header: http.Header{ "User-Agent": {userAgent}, }, } if err := w.(http.Pusher).Push(stURL+"/pushed?get", opt); err != nil { errc <- fmt.Errorf("error pushing /pushed?get: %v", err) return } // Push "/pushed?head" as a HEAD request, using a path. opt = &http.PushOptions{ Method: "HEAD", Header: http.Header{ "User-Agent": {userAgent}, "Cookie": {cookie}, }, } if err := w.(http.Pusher).Push("/pushed?head", opt); err != nil { errc <- fmt.Errorf("error pushing /pushed?head: %v", err) return } w.Header().Set("Content-Type", "text/html") w.Header().Set("Content-Length", strconv.Itoa(len(mainBody))) w.WriteHeader(200) io.WriteString(w, mainBody) errc <- nil case "/pushed?get": wantH := http.Header{} wantH.Set("User-Agent", userAgent) if err := checkPromisedReq(r, "GET", wantH); err != nil { errc <- fmt.Errorf("/pushed?get: %v", err) return } w.Header().Set("Content-Type", "text/html") w.Header().Set("Content-Length", strconv.Itoa(len(pushedBody))) w.WriteHeader(200) io.WriteString(w, pushedBody) errc <- nil case "/pushed?head": wantH := http.Header{} wantH.Set("User-Agent", userAgent) wantH.Set("Cookie", cookie) if err := checkPromisedReq(r, "HEAD", wantH); err != nil { errc <- fmt.Errorf("/pushed?head: %v", err) return } w.WriteHeader(204) errc <- nil default: errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI()) } }) stURL = st.ts.URL // Send one request, which should push two responses. st.greet() getSlash(st) for k := 0; k < 3; k++ { select { case <-time.After(2 * time.Second): t.Errorf("timeout waiting for handler %d to finish", k) case err := <-errc: if err != nil { t.Fatal(err) } } } checkPushPromise := func(f Frame, promiseID uint32, wantH [][2]string) error { pp, ok := f.(*PushPromiseFrame) if !ok { return fmt.Errorf("got a %T; want *PushPromiseFrame", f) } if !pp.HeadersEnded() { return fmt.Errorf("want END_HEADERS flag in PushPromiseFrame") } if got, want := pp.PromiseID, promiseID; got != want { return fmt.Errorf("got PromiseID %v; want %v", got, want) } gotH := st.decodeHeader(pp.HeaderBlockFragment()) if !reflect.DeepEqual(gotH, wantH) { return fmt.Errorf("got promised headers %v; want %v", gotH, wantH) } return nil } checkHeaders := func(f Frame, wantH [][2]string) error { hf, ok := f.(*HeadersFrame) if !ok { return fmt.Errorf("got a %T; want *HeadersFrame", f) } gotH := st.decodeHeader(hf.HeaderBlockFragment()) if !reflect.DeepEqual(gotH, wantH) { return fmt.Errorf("got response headers %v; want %v", gotH, wantH) } return nil } checkData := func(f Frame, wantData string) error { df, ok := f.(*DataFrame) if !ok { return fmt.Errorf("got a %T; want *DataFrame", f) } if gotData := string(df.Data()); gotData != wantData { return fmt.Errorf("got response data %q; want %q", gotData, wantData) } return nil } // Stream 1 has 2 PUSH_PROMISE + HEADERS + DATA // Stream 2 has HEADERS + DATA // Stream 4 has HEADERS expected := map[uint32][]func(Frame) error{ 1: { func(f Frame) error { return checkPushPromise(f, 2, [][2]string{ {":method", "GET"}, {":scheme", "https"}, {":authority", st.ts.Listener.Addr().String()}, {":path", "/pushed?get"}, {"user-agent", userAgent}, }) }, func(f Frame) error { return checkPushPromise(f, 4, [][2]string{ {":method", "HEAD"}, {":scheme", "https"}, {":authority", st.ts.Listener.Addr().String()}, {":path", "/pushed?head"}, {"cookie", cookie}, {"user-agent", userAgent}, }) }, func(f Frame) error { return checkHeaders(f, [][2]string{ {":status", "200"}, {"content-type", "text/html"}, {"content-length", strconv.Itoa(len(mainBody))}, }) }, func(f Frame) error { return checkData(f, mainBody) }, }, 2: { func(f Frame) error { return checkHeaders(f, [][2]string{ {":status", "200"}, {"content-type", "text/html"}, {"content-length", strconv.Itoa(len(pushedBody))}, }) }, func(f Frame) error { return checkData(f, pushedBody) }, }, 4: { func(f Frame) error { return checkHeaders(f, [][2]string{ {":status", "204"}, }) }, }, } consumed := map[uint32]int{} for k := 0; len(expected) > 0; k++ { f, err := st.readFrame() if err != nil { for id, left := range expected { t.Errorf("stream %d: missing %d frames", id, len(left)) } t.Fatalf("readFrame %d: %v", k, err) } id := f.Header().StreamID label := fmt.Sprintf("stream %d, frame %d", id, consumed[id]) if len(expected[id]) == 0 { t.Fatalf("%s: unexpected frame %#+v", label, f) } check := expected[id][0] expected[id] = expected[id][1:] if len(expected[id]) == 0 { delete(expected, id) } if err := check(f); err != nil { t.Fatalf("%s: %v", label, err) } consumed[id]++ } } func TestServer_Push_SuccessNoRace(t *testing.T) { // Regression test for issue #18326. Ensure the request handler can mutate // pushed request headers without racing with the PUSH_PROMISE write. errc := make(chan error, 2) st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { switch r.URL.RequestURI() { case "/": opt := &http.PushOptions{ Header: http.Header{"User-Agent": {"testagent"}}, } if err := w.(http.Pusher).Push("/pushed", opt); err != nil { errc <- fmt.Errorf("error pushing: %v", err) return } w.WriteHeader(200) errc <- nil case "/pushed": // Update request header, ensure there is no race. r.Header.Set("User-Agent", "newagent") r.Header.Set("Cookie", "cookie") w.WriteHeader(200) errc <- nil default: errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI()) } }) // Send one request, which should push one response. st.greet() getSlash(st) for k := 0; k < 2; k++ { select { case <-time.After(2 * time.Second): t.Errorf("timeout waiting for handler %d to finish", k) case err := <-errc: if err != nil { t.Fatal(err) } } } } func TestServer_Push_RejectRecursivePush(t *testing.T) { // Expect two requests, but might get three if there's a bug and the second push succeeds. errc := make(chan error, 3) handler := func(w http.ResponseWriter, r *http.Request) error { baseURL := "https://" + r.Host switch r.URL.Path { case "/": if err := w.(http.Pusher).Push(baseURL+"/push1", nil); err != nil { return fmt.Errorf("first Push()=%v, want nil", err) } return nil case "/push1": if got, want := w.(http.Pusher).Push(baseURL+"/push2", nil), ErrRecursivePush; got != want { return fmt.Errorf("Push()=%v, want %v", got, want) } return nil default: return fmt.Errorf("unexpected path: %q", r.URL.Path) } } st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { errc <- handler(w, r) }) defer st.Close() st.greet() getSlash(st) if err := <-errc; err != nil { t.Errorf("First request failed: %v", err) } if err := <-errc; err != nil { t.Errorf("Second request failed: %v", err) } } func testServer_Push_RejectSingleRequest(t *testing.T, doPush func(http.Pusher, *http.Request) error, settings ...Setting) { // Expect one request, but might get two if there's a bug and the push succeeds. errc := make(chan error, 2) st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { errc <- doPush(w.(http.Pusher), r) }) defer st.Close() st.greet() if err := st.fr.WriteSettings(settings...); err != nil { st.t.Fatalf("WriteSettings: %v", err) } st.wantSettingsAck() getSlash(st) if err := <-errc; err != nil { t.Error(err) } // Should not get a PUSH_PROMISE frame. hf := st.wantHeaders() if !hf.StreamEnded() { t.Error("stream should end after headers") } } func TestServer_Push_RejectIfDisabled(t *testing.T) { testServer_Push_RejectSingleRequest(t, func(p http.Pusher, r *http.Request) error { if got, want := p.Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want { return fmt.Errorf("Push()=%v, want %v", got, want) } return nil }, Setting{SettingEnablePush, 0}) } func TestServer_Push_RejectWhenNoConcurrentStreams(t *testing.T) { testServer_Push_RejectSingleRequest(t, func(p http.Pusher, r *http.Request) error { if got, want := p.Push("https://"+r.Host+"/pushed", nil), ErrPushLimitReached; got != want { return fmt.Errorf("Push()=%v, want %v", got, want) } return nil }, Setting{SettingMaxConcurrentStreams, 0}) } func TestServer_Push_RejectWrongScheme(t *testing.T)
func TestServer_Push_RejectMissingHost(t *testing.T) { testServer_Push_RejectSingleRequest(t, func(p http.Pusher, r *http.Request) error { if err := p.Push("https:pushed", nil); err == nil { return errors.New("Push() should have failed (push target URL missing host)") } return nil }) } func TestServer_Push_RejectRelativePath(t *testing.T) { testServer_Push_RejectSingleRequest(t, func(p http.Pusher, r *http.Request) error { if err := p.Push("../test", nil); err == nil { return errors.New("Push() should have failed (push target is a relative path)") } return nil }) } func TestServer_Push_RejectForbiddenMethod(t *testing.T) { testServer_Push_RejectSingleRequest(t, func(p http.Pusher, r *http.Request) error { if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Method: "POST"}); err == nil { return errors.New("Push() should have failed (cannot promise a POST)") } return nil }) } func TestServer_Push_RejectForbiddenHeader(t *testing.T) { testServer_Push_RejectSingleRequest(t, func(p http.Pusher, r *http.Request) error { header := http.Header{ "Content-Length": {"10"}, "Content-Encoding": {"gzip"}, "Trailer": {"Foo"}, "Te": {"trailers"}, "Host": {"test.com"}, ":authority": {"test.com"}, } if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Header: header}); err == nil { return errors.New("Push() should have failed (forbidden headers)") } return nil }) } func TestServer_Push_StateTransitions(t *testing.T) { const body = "foo" gotPromise := make(chan bool) finishedPush := make(chan bool) st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { switch r.URL.RequestURI() { case "/": if err := w.(http.Pusher).Push("/pushed", nil); err != nil { t.Errorf("Push error: %v", err) } // Don't finish this request until the push finishes so we don't // nondeterministically interleave output frames with the push. <-finishedPush case "/pushed": <-gotPromise } w.Header().Set("Content-Type", "text/html") w.Header().Set("Content-Length", strconv.Itoa(len(body))) w.WriteHeader(200) io.WriteString(w, body) }) defer st.Close() st.greet() if st.stream(2) != nil { t.Fatal("stream 2 should be empty") } if got, want := st.streamState(2), stateIdle; got != want { t.Fatalf("streamState(2)=%v, want %v", got, want) } getSlash(st) // After the PUSH_PROMISE is sent, the stream should be stateHalfClosedRemote. st.wantPushPromise() if got, want := st.streamState(2), stateHalfClosedRemote; got != want { t.Fatalf("streamState(2)=%v, want %v", got, want) } // We stall the HTTP handler for "/pushed" until the above check. If we don't // stall the handler, then the handler might write HEADERS and DATA and finish // the stream before we check st.streamState(2) -- should that happen, we'll // see stateClosed and fail the above check. close(gotPromise) st.wantHeaders() if df := st.wantData(); !df.StreamEnded() { t.Fatal("expected END_STREAM flag on DATA") } if got, want := st.streamState(2), stateClosed; got != want { t.Fatalf("streamState(2)=%v, want %v", got, want) } close(finishedPush) } func TestServer_Push_RejectAfterGoAway(t *testing.T) { var readyOnce sync.Once ready := make(chan struct{}) errc := make(chan error, 2) st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { select { case <-ready: case <-time.After(5 * time.Second): errc <- fmt.Errorf("timeout waiting for GOAWAY to be processed") } if got, want := w.(http.Pusher).Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want { errc <- fmt.Errorf("Push()=%v, want %v", got, want) } errc <- nil }) defer st.Close() st.greet() getSlash(st) // Send GOAWAY and wait for it to be processed. st.fr.WriteGoAway(1, ErrCodeNo, nil) go func() { for { select { case <-ready: return default: } st.sc.testHookCh <- func(loopNum int) { if !st.sc.pushEnabled { readyOnce.Do(func() { close(ready) }) } } } }() if err := <-errc; err != nil { t.Error(err) } }
{ testServer_Push_RejectSingleRequest(t, func(p http.Pusher, r *http.Request) error { if err := p.Push("http://"+r.Host+"/pushed", nil); err == nil { return errors.New("Push() should have failed (push target URL is http)") } return nil }) }
normalizeTuple_test.js
/*globals Foo:true, $foo:true */ import { normalizeTuple } from "ember-metal/property_get"; var obj, moduleOpts = { setup: function() { obj = { foo: { bar: { baz: {} } } }; window.Foo = { bar: { baz: {} } }; window.$foo = { bar: { baz: {} } }; }, teardown: function() { obj = undefined; window.Foo = undefined; window.$foo = undefined; } }; QUnit.module('normalizeTuple', moduleOpts); // .......................................................... // LOCAL PATHS // test('[obj, foo] -> [obj, foo]', function() { deepEqual(normalizeTuple(obj, 'foo'), [obj, 'foo']); }); test('[obj, *] -> [obj, *]', function() { deepEqual(normalizeTuple(obj, '*'), [obj, '*']); }); test('[obj, foo.bar] -> [obj, foo.bar]', function() { deepEqual(normalizeTuple(obj, 'foo.bar'), [obj, 'foo.bar']); }); test('[obj, foo.*] -> [obj, foo.*]', function() { deepEqual(normalizeTuple(obj, 'foo.*'), [obj, 'foo.*']); }); test('[obj, foo.*.baz] -> [obj, foo.*.baz]', function() {
test('[obj, this.foo] -> [obj, foo]', function() { deepEqual(normalizeTuple(obj, 'this.foo'), [obj, 'foo']); }); test('[obj, this.foo.bar] -> [obj, foo.bar]', function() { deepEqual(normalizeTuple(obj, 'this.foo.bar'), [obj, 'foo.bar']); }); test('[obj, this.Foo.bar] -> [obj, Foo.bar]', function() { deepEqual(normalizeTuple(obj, 'this.Foo.bar'), [obj, 'Foo.bar']); }); // .......................................................... // GLOBAL PATHS // test('[obj, Foo] -> [obj, Foo]', function() { deepEqual(normalizeTuple(obj, 'Foo'), [obj, 'Foo']); }); test('[obj, Foo.bar] -> [Foo, bar]', function() { deepEqual(normalizeTuple(obj, 'Foo.bar'), [Foo, 'bar']); }); test('[obj, $foo.bar.baz] -> [$foo, bar.baz]', function() { deepEqual(normalizeTuple(obj, '$foo.bar.baz'), [$foo, 'bar.baz']); }); // .......................................................... // NO TARGET // test('[null, Foo] -> EXCEPTION', function() { raises(function() { normalizeTuple(null, 'Foo'); }, Error); }); test('[null, Foo.bar] -> [Foo, bar]', function() { deepEqual(normalizeTuple(null, 'Foo.bar'), [Foo, 'bar']); });
deepEqual(normalizeTuple(obj, 'foo.*.baz'), [obj, 'foo.*.baz']); });
validator.rs
// Copyright Materialize, Inc. and contributors. All rights reserved. // // Use of this software is governed by the Business Source License // included in the LICENSE file. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. use std::collections::{BTreeMap, HashMap}; use std::fmt; use std::time::Instant; use tracing::info; use differential_dataflow::lattice::Lattice; use timely::progress::{Antichain, Timestamp}; use crate::error::Error; use crate::nemesis::validator::uptime::Uptime; use crate::nemesis::{ AllowCompactionReq, ReadOutputEvent, ReadOutputReq, ReadOutputRes, ReadSnapshotReq, ReadSnapshotRes, Res, SealReq, SnapshotId, Step, StepMeta, TakeSnapshotReq, WriteReq, WriteReqMulti, WriteReqSingle, }; use crate::storage::SeqNo; #[derive(Debug)] pub struct Validator { seal_frontier: HashMap<String, u64>, since_frontier: HashMap<String, u64>, writes_by_seqno: BTreeMap<(String, SeqNo), Vec<((String, ()), u64, i64)>>, output_by_stream: HashMap<String, Vec<ReadOutputEvent<(Result<(String, ()), String>, u64, i64)>>>, available_snapshots: HashMap<SnapshotId, (String, Instant)>, errors: Vec<String>, uptime: Uptime, } // TODO: Unit tests for Validator. This is already fairly complicated and it's // only going to get more complicated as we add more checks for API invariants. impl Validator { pub fn validate(mut history: Vec<Step>) -> Result<(), Vec<String>> { // Uptime requires that its input be sorted by before. history.sort_by_key(|s| s.meta.before); let uptime = Uptime::new(&history); let mut v = Validator::new(uptime); for step in history.into_iter() { v.step(step); } v.finish() } fn new(uptime: Uptime) -> Self { Validator { seal_frontier: HashMap::new(), since_frontier: HashMap::new(), writes_by_seqno: BTreeMap::new(), output_by_stream: HashMap::new(), available_snapshots: HashMap::new(), errors: Vec::new(), uptime, } } fn finish(self) -> Result<(), Vec<String>> { if self.errors.is_empty() { Ok(()) } else { Err(self.errors) } } fn step(&mut self, s: Step) { info!("step: {:?}", &s); match s.res { Res::Write(WriteReq::Single(req), res) => self.step_write_single(&s.meta, req, res), Res::Write(WriteReq::Multi(req), res) => self.step_write_multi(&s.meta, req, res), Res::ReadOutput(req, res) => self.step_read_output(&s.meta, req, res), Res::Seal(req, res) => self.step_seal(&s.meta, req, res), Res::AllowCompaction(req, res) => self.step_allow_compaction(&s.meta, req, res), Res::TakeSnapshot(req, res) => self.step_take_snapshot(&s.meta, req, res), Res::ReadSnapshot(req, res) => self.step_read_snapshot(&s.meta, req, res), Res::Start(res) => self.step_start(&s.meta, res), Res::Stop(res) => self.step_stop(&s.meta, res), Res::StorageUnavailable | Res::StorageAvailable => {} } } /// Enqueues an error if the given result was an error but we know /// externally that it should have succeeded. /// /// This used to also require failure when it was known externally that it /// shouldn't have been able to work (e.g. a write when storage is /// unavailable). That worked when nemesis was totally deterministic, but /// was already becoming a maintenance burden. As we move toward pipelining /// requests, running them concurrently in threads, and adding periods of /// non-determinism to the unreliable storage, it will require trinary logic /// (yes/no/maybe), which gets surprisingly complicated. /// /// Stepping back, nemesis's purpose is to verify that we adhere to our /// external API guarantees. This means assertions on histories of /// request/response pairs and outputs of dataflow operators, not anything /// around what to expect given detailed history of the availability of /// external systems. /// /// However, we do keep this one "liveness" check around so we don't /// accidentally pass validation by simply erroring every request (which is /// a valid, but not useful history). fn check_success<T: fmt::Debug>( &mut self, meta: &StepMeta, res: &Result<T, Error>, require_succeed: bool, ) { match (require_succeed, res) { (true, Err(err)) => self.errors.push(format!( "expected success but got error {:?}: {}", meta.req_id, err )), _ => {} } } fn check_failure<T: fmt::Debug>( &mut self, meta: &StepMeta, res: &Result<T, Error>, require_error: bool, ) { match (require_error, res) { (true, Ok(res)) => self.errors.push(format!( "expected error but got success {:?}: {:?}", meta.req_id, res )), _ => {} } } fn step_write_single( &mut self, meta: &StepMeta, req: WriteReqSingle, res: Result<SeqNo, Error>, ) { let req_ok = req.update.1 >= self .seal_frontier .get(&req.stream) .copied() .unwrap_or_default(); let require_succeed = self.uptime.storage_available(meta.before, meta.after) && self.uptime.runtime_available(meta.before, meta.after) && req_ok; self.check_success(meta, &res, require_succeed); self.check_failure(meta, &res, !req_ok); if let Ok(res) = res { self.writes_by_seqno .entry((req.stream, res)) .or_default() .push(req.update); } } fn step_write_multi(&mut self, meta: &StepMeta, req: WriteReqMulti, res: Result<SeqNo, Error>) { let req_ok = req.writes.len() > 0 && req.writes.iter().all(|req| { req.update.1 >= self .seal_frontier .get(&req.stream) .copied() .unwrap_or_default() }); let require_succeed = self.uptime.storage_available(meta.before, meta.after) && self.uptime.runtime_available(meta.before, meta.after) && req_ok; self.check_success(meta, &res, require_succeed); self.check_failure(meta, &res, !req_ok); if let Ok(res) = res { for req in req.writes { self.writes_by_seqno .entry((req.stream, res)) .or_default() .push(req.update); } } } fn step_read_output( &mut self, meta: &StepMeta, req: ReadOutputReq, res: Result<ReadOutputRes, Error>, ) { let require_succeed = true; self.check_success(meta, &res, require_succeed); if let Ok(res) = res { let all_stream_output = self.output_by_stream.entry(req.stream.clone()).or_default(); all_stream_output.extend(res.contents); // Seal acts as a barrier, so we're guaranteed to receive any writes // that were sent for times before the seal. However, we're not // guaranteed to see every seal we sent, especially across restarts. // Start by finding the latest seal. let mut latest_seal = Timestamp::minimum(); let mut all_received_writes = Vec::new(); let mut all_received_errors = Vec::new(); for e in all_stream_output.iter() { match e { ReadOutputEvent::Sealed(ts) => { if *ts > latest_seal { latest_seal = *ts; } } ReadOutputEvent::Records(records) => { for r in records.iter() { match r { (Ok((k, v)), ts, diff) => { all_received_writes.push(((k.clone(), *v), *ts, *diff)) } (Err(err), _, _) => all_received_errors.push(err.clone()), } } } } } // If we've gotten any errors out of the dataflow, all bets are off. if !all_received_errors.is_empty() { return; } // The latest seal shouldn't be past anything we sent. let latest_seal_sent = self .seal_frontier .get(&req.stream) .copied() .unwrap_or_default(); if latest_seal > latest_seal_sent { self.errors.push(format!( "received seal {} greater than the latest one we sent {}", latest_seal, latest_seal_sent )); } // To compare two sets of records, we first need to ensure they have // the same since. // // In the writes_by_seqno map that Validator keeps internally, the // original full-fidelity records are kept. This corresponds to a // since of 0 (more specifically, the empty antichain). // // Because the records output by the PersistedSource operator // includes replaying a snapshot, it may have a since >0. Once we // fix #8608, we'll know exactly what it is (because #8608 is all // about specifying the since/as_of at construction time) but until // then we don't really know what it is. // // So what we do for now is forward both sets of records to a since // that's guaranteed to be at in advance of both of them: // specifically the the largest thing we've allowed_compaction to. let as_of = Antichain::from_elem( self.since_frontier .get(&req.stream) .copied() .unwrap_or_default(), ); if !as_of.less_than(&latest_seal) { // TODO: We cannot currently verify cases where the compaction frontier is beyond // the since, because we cannot determine anymore (based on the latest seal // timestamp) which records (both from the expected writes and the writes we get // from timely) are eligible for verification. return; } // Verify that the output contains all sent writes less than the // latest seal it contains. // // TODO: Figure out what our contract is for writes we've received // in advance of this latest seal. There should be something we can // do here. let mut actual = all_received_writes .into_iter() .filter(|(_, ts, _)| *ts < latest_seal) .map(|(kv, mut ts, diff)| { // TODO: For the same reason we only advance the "expected" // side of updates_eq (the Validator's writes_by_seqno), // once we've fixed #8608, we should only advance the // `expected` side of updates_eq to whatever we set as the // as_of when constructing the PersistedSource operator. // Correct adherence to the since/as_of is part of the // interface of Snapshot and #8608 is all about making it // part of the interface of PersistedSource as well. ts.advance_by(as_of.borrow()); (kv, ts, diff) }) .collect(); let mut expected: Vec<((String, ()), u64, i64)> = self .writes_by_seqno .range((req.stream.clone(), SeqNo(0))..(req.stream, SeqNo(u64::MAX))) .flat_map(|(_, v)| v) .filter(|(_, ts, _)| *ts < latest_seal) .cloned() .collect(); if !updates_eq(&mut actual, &mut expected, as_of) { self.errors.push(format!( "incorrect output {:?} up to {}, expected {:?} got: {:?}", meta.req_id, latest_seal, expected, actual )); } } } fn step_seal(&mut self, meta: &StepMeta, req: SealReq, res: Result<SeqNo, Error>) { let req_ok = req.ts >= self .seal_frontier .get(&req.stream) .copied() .unwrap_or_default(); let require_succeed = self.uptime.storage_available(meta.before, meta.after) && self.uptime.runtime_available(meta.before, meta.after) && req_ok; self.check_success(meta, &res, require_succeed); self.check_failure(meta, &res, !req_ok); if let Ok(_) = res { self.seal_frontier.insert(req.stream, req.ts); } } fn step_allow_compaction( &mut self, meta: &StepMeta, req: AllowCompactionReq, res: Result<SeqNo, Error>, ) { let req_ok = req.ts >= self .since_frontier .get(&req.stream) .copied() .unwrap_or_default(); let require_succeed = self.uptime.storage_available(meta.before, meta.after) && self.uptime.runtime_available(meta.before, meta.after) && req_ok; self.check_success(meta, &res, require_succeed); self.check_failure(meta, &res, !req_ok); if let Ok(_) = res { self.since_frontier.insert(req.stream, req.ts); } } fn step_take_snapshot( &mut self, meta: &StepMeta, req: TakeSnapshotReq, res: Result<SeqNo, Error>, ) { let require_succeed = self.uptime.storage_available(meta.before, meta.after) && self.uptime.runtime_available(meta.before, meta.after); self.check_success(meta, &res, require_succeed); if let Ok(_) = res { self.available_snapshots .insert(req.snap, (req.stream, meta.before)); } } fn step_read_snapshot( &mut self, meta: &StepMeta, req: ReadSnapshotReq, res: Result<ReadSnapshotRes, Error>, ) { match self.available_snapshots.remove(&req.snap) { None => { self.check_success(meta, &res, false); } Some((stream, before_snap_start)) => { let require_succeed = self.uptime.storage_available(before_snap_start, meta.after) && self.uptime.runtime_available(before_snap_start, meta.after); self.check_success(meta, &res, require_succeed); if let Ok(res) = res { let mut actual = res.contents; let mut expected: Vec<((String, ()), u64, i64)> = self .writes_by_seqno .range((stream.clone(), SeqNo(0))..=(stream, SeqNo(res.seqno))) .flat_map(|(_, v)| v) .cloned() .collect(); if !updates_eq(&mut actual, &mut expected, res.since) { self.errors.push(format!( "incorrect snapshot {:?} expected {:?} got: {:?}", meta.req_id, expected, actual )); } } } } } fn step_start(&mut self, meta: &StepMeta, res: Result<(), Error>) { // The semantics of Req::Start are pretty blunt. It unconditionally // attempts to start a new persister. If the storage is down, the new // one won't be able to read metadata and will fail to start. This will // cause all operations on the persister to fail until it gets another // Req::Start with storage available. This ends up being a pretty // uninteresting state to test, so we filter out everything but start // and storage_available when the runtime is not available. let require_succeed = self.uptime.storage_available(meta.before, meta.after); self.check_success(meta, &res, require_succeed); } fn step_stop(&mut self, meta: &StepMeta, res: Result<(), Error>) { // Stop will succeed if it can cleanly release locks, which // requires the storage to be available. let require_succeed = self.uptime.storage_available(meta.before, meta.after); self.check_success(meta, &res, require_succeed); self.output_by_stream.clear(); } } fn updates_eq( actual: &mut Vec<((String, ()), u64, i64)>, expected: &mut Vec<((String, ()), u64, i64)>, since: Antichain<u64>, ) -> bool { // TODO: This is also used by the implementation. Write a slower but more // obvious impl of consolidation here and use it for validation. // The snapshot has been logically compacted to since, so update our // expected to match.
} differential_dataflow::consolidation::consolidate_updates(actual); differential_dataflow::consolidation::consolidate_updates(expected); actual == expected } mod uptime { use std::cmp; use std::collections::BTreeMap; use std::time::Instant; use crate::nemesis::{Res, Step}; /// A helper for Validate that tracks which times storage/runtime were /// unambiguously up. #[derive(Debug)] pub struct Uptime { storage_downtime: IntervalTree<Instant>, runtime_downtime: IntervalTree<Instant>, } impl Uptime { /// Returns a new [Uptime] from the given steps. /// /// Steps must be sorted by before. pub fn new(steps: &[Step]) -> Self { let (storage_downtime, runtime_downtime) = match steps.iter().max_by_key(|s| s.meta.after) { Some(max_after) => ( Uptime::storage_downtime(steps, max_after.meta.after), Uptime::runtime_downtime(steps, max_after.meta.after), ), None => (IntervalTree::default(), IntervalTree::default()), }; Uptime { storage_downtime, runtime_downtime, } } /// Returns true if storage was unambiguously available for the entire given /// range. /// /// Before and after are both inclusive. pub fn storage_available(&self, before: Instant, after: Instant) -> bool { !self.storage_downtime.overlaps(before, after) } /// Returns true if the runtime was unambiguously available for the entire /// given range. /// /// Before and after are both inclusive. pub fn runtime_available(&self, before: Instant, after: Instant) -> bool { !self.runtime_downtime.overlaps(before, after) } // NB: Steps must be sorted by before. fn storage_downtime(steps: &[Step], after_all_steps: Instant) -> IntervalTree<Instant> { let mut downtime_before = None; let mut storage_downtime = IntervalTree::default(); for step in steps { match step.res { Res::StorageAvailable => match downtime_before.take() { Some(downtime_before) => { storage_downtime.push(downtime_before, step.meta.after) } None => {} }, Res::StorageUnavailable => { if downtime_before.is_none() { downtime_before = Some(step.meta.before) } } _ => {} } } // If downtime_before is still a Some, that means the test ended in // downtime. if let Some(downtime_before) = downtime_before.take() { storage_downtime.push(downtime_before, after_all_steps); } storage_downtime } // NB: Steps must be sorted by before. fn runtime_downtime(steps: &[Step], after_all_steps: Instant) -> IntervalTree<Instant> { let mut downtime_before = None; let mut runtime_downtime = IntervalTree::default(); for step in steps { match step.res { // We tried to start the runtime and it succeeded. Res::Start(Ok(_)) => match downtime_before.take() { Some(downtime_before) => { runtime_downtime.push(downtime_before, step.meta.after) } None => {} }, // We either tried to start the runtime and it failed (most // likely the storage was down) or we stopped the runtime. Res::Start(Err(_)) | Res::Stop(_) => { if downtime_before.is_none() { downtime_before = Some(step.meta.before) } } _ => {} } } // If downtime_before is still a Some, that means the test ended in // downtime. if let Some(downtime_before) = downtime_before.take() { runtime_downtime.push(downtime_before, after_all_steps); } runtime_downtime } } /// Just enough of an interval tree implementation for Validator's needs. /// /// If this gets any more complicated, we should probably switch to an interval /// tree library. Simplifying requirements of this impl: /// - Construction happens sorted by start. /// - All added intervals are merged. #[derive(Debug)] pub struct IntervalTree<T> { intervals: BTreeMap<T, T>, } impl<T: Ord> Default for IntervalTree<T> { fn default() -> Self { Self { intervals: BTreeMap::new(), } } } impl<T: Ord + Copy> IntervalTree<T> { /// Add a new interval to this tree. /// /// Start must be >= the start of any previous interval added. Both start /// and end are inclusive. pub fn push(&mut self, start: T, end: T) { assert!(end >= start); assert!(self .intervals .iter() .last() .map_or(true, |(s, _)| *s <= start)); let overlapping = self.intervals.iter().last().and_then(|(s, e)| { if *e >= start { Some((*s, *e)) } else { None } }); match overlapping { Some((overlapping_s, overlapping_e)) => self .intervals .insert(overlapping_s, cmp::max(overlapping_e, end)), None => self.intervals.insert(start, end), }; } /// Returns whether the given interval overlaps any previously added /// interval. /// /// Both start and end are inclusive. pub fn overlaps(&self, begin: T, end: T) -> bool { let overlaps_lt_begin = self .intervals .range(..begin) .last() .map_or(false, |(_, e)| *e >= begin); let overlaps_ge_begin = self.intervals.range(begin..=end).next().is_some(); overlaps_lt_begin || overlaps_ge_begin } } #[cfg(test)] mod tests { use std::time::{Duration, Instant}; use crate::error::Error; use crate::nemesis::{ReqId, StepMeta}; use super::*; #[test] fn uptime() { let beginning_of_time = Instant::now(); let ts = |offset_micros| beginning_of_time + Duration::from_micros(offset_micros); let storage_up = |before, after| Step { meta: StepMeta { req_id: ReqId(0), before: ts(before), after: ts(after), }, res: Res::StorageAvailable, }; let storage_down = |before, after| Step { meta: StepMeta { req_id: ReqId(0), before: ts(before), after: ts(after), }, res: Res::StorageUnavailable, }; let runtime_up = |before, after, success| Step { meta: StepMeta { req_id: ReqId(0), before: ts(before), after: ts(after), }, res: Res::Start(if success { Ok(()) } else { Err(Error::from("failed")) }), }; let runtime_down = |before, after| Step { meta: StepMeta { req_id: ReqId(0), before: ts(before), after: ts(after), }, res: Res::Stop(Ok(())), }; // Empty. Both storage and runtime start as available. let u = Uptime::new(&[]); assert_eq!(u.storage_available(ts(0), ts(1)), true); assert_eq!(u.runtime_available(ts(0), ts(1)), true); // Check storage. let u = Uptime::new(&[ // Storage starts up, so this is a no-op. storage_up(1, 2), // The [4,5] interval here are a bounds on some instant when it // became unavailable. It stays down until the next up. storage_down(4, 5), // Storage became re-available somewhere in this interval, but // we don't know for sure that it's back until _after_ this // entire interval. Since the end is closed, that meant ts(9) is // the first time it's guaranteed back up.l storage_up(7, 8), // End with storage down to catch an edge case. storage_down(11, 12), ]); // Storage starts up so the [1, 2] storage_up is a no-op. assert_eq!(u.storage_available(ts(0), ts(3)), true); // All intervals are inclusive on both ends, so anything touching // [4,8] is down. assert_eq!(u.storage_available(ts(2), ts(3)), true); assert_eq!(u.storage_available(ts(3), ts(3)), true); assert_eq!(u.storage_available(ts(3), ts(4)), false); assert_eq!(u.storage_available(ts(4), ts(4)), false); assert_eq!(u.storage_available(ts(4), ts(8)), false); assert_eq!(u.storage_available(ts(5), ts(7)), false); assert_eq!(u.storage_available(ts(8), ts(8)), false); assert_eq!(u.storage_available(ts(9), ts(9)), true); assert_eq!(u.storage_available(ts(9), ts(10)), true); // Runtime has its own wrinkles (unlike making storage available, // starting the runtime can fail), so exercise those. We could also // duplicate the storage tests above for runtime too, but the code // pathways are the same so don't bother. let u = Uptime::new(&[ runtime_down(1, 2), // Runtime fails to start, so is still down runtime_up(4, 5, false), // Runtime actually starts. runtime_up(7, 8, true), ]); assert_eq!(u.runtime_available(ts(0), ts(0)), true); assert_eq!(u.runtime_available(ts(1), ts(4)), false); assert_eq!(u.runtime_available(ts(6), ts(6)), false); assert_eq!(u.runtime_available(ts(9), ts(9)), true); // Check that runtime and storage are independent. (They're not in // reality, runtime can't come up with storage down, but we let the // step results tell us that instead of inferring it.) let u = Uptime::new(&[ runtime_down(1, 2), runtime_up(4, 5, true), storage_down(7, 8), storage_up(10, 11), ]); assert_eq!(u.storage_available(ts(1), ts(5)), true); assert_eq!(u.runtime_available(ts(1), ts(5)), false); assert_eq!(u.storage_available(ts(7), ts(11)), false); assert_eq!(u.runtime_available(ts(7), ts(11)), true); } #[test] fn interval_tree() { let mut i = IntervalTree::<usize>::default(); // Empty tree. assert_eq!(i.overlaps(0, 1), false); // Add some data and check overlaps. i.push(2, 3); i.push(10, 13); assert_eq!(i.overlaps(0, 1), false); assert_eq!(i.overlaps(0, 2), true); assert_eq!(i.overlaps(1, 2), true); assert_eq!(i.overlaps(1, 3), true); assert_eq!(i.overlaps(1, 4), true); assert_eq!(i.overlaps(2, 2), true); assert_eq!(i.overlaps(2, 3), true); assert_eq!(i.overlaps(3, 3), true); assert_eq!(i.overlaps(3, 4), true); assert_eq!(i.overlaps(4, 5), false); assert_eq!(i.overlaps(11, 12), true); // Regression test for a bug in the initial impl where the `end` of // the interval being pushed was always kept, even if one in the // tree already had a later end. assert_eq!(i.overlaps(12, 13), true); assert_eq!(i.overlaps(13, 14), true); i.push(11, 12); assert_eq!(i.overlaps(13, 14), true); } } }
for (_, t, _) in expected.iter_mut() { t.advance_by(since.borrow());
opcode.rs
#[derive(Debug, PartialEq)] pub enum Opcode { SHR = 0x3E, SHL = 0x3C, ADD = 0x2B, SUB = 0x2D, PUTCHAR = 0x2E, GETCHAR = 0x2C, LB = 0x5B, RB = 0x5D, } impl From<u8> for Opcode { fn from(u: u8) -> Self { match u { 0x3E => Opcode::SHR, 0x3C => Opcode::SHL, 0x2B => Opcode::ADD, 0x2D => Opcode::SUB, 0x2E => Opcode::PUTCHAR, 0x2C => Opcode::GETCHAR, 0x5B => Opcode::LB, 0x5D => Opcode::RB, _ => panic!(), } } } impl Into<u8> for Opcode { fn into(self) -> u8 { match self { Opcode::SHR => 0x3E, Opcode::SHL => 0x3C, Opcode::ADD => 0x2B, Opcode::SUB => 0x2D, Opcode::PUTCHAR => 0x2E, Opcode::GETCHAR => 0x2C, Opcode::LB => 0x5B, Opcode::RB => 0x5D, } } } pub struct Code { pub instrs: Vec<Opcode>, pub jtable: std::collections::HashMap<usize, usize>, } impl Code { pub fn from(data: Vec<u8>) -> Result<Self, Box<dyn std::error::Error>> { let dict: Vec<u8> = vec![ Opcode::SHL.into(), Opcode::SHR.into(), Opcode::ADD.into(), Opcode::SUB.into(), Opcode::GETCHAR.into(), Opcode::PUTCHAR.into(), Opcode::LB.into(), Opcode::RB.into(), ]; let instrs: Vec<Opcode> = data .iter() .filter(|x| dict.contains(x)) .map(|x| Opcode::from(*x)) .collect(); let mut jstack: Vec<usize> = Vec::new(); let mut jtable: std::collections::HashMap<usize, usize> = std::collections::HashMap::new(); for (i, e) in instrs.iter().enumerate() { if Opcode::LB == *e { jstack.push(i); } if Opcode::RB == *e { let j = jstack.pop().ok_or("pop from empty list")?; jtable.insert(j, i); jtable.insert(i, j); } } Ok(Code { instrs, jtable })
} }
derive-helper-shadowed.rs
// compile-pass // aux-build:derive-helper-shadowed.rs // aux-build:derive-helper-shadowed-2.rs #[macro_use] extern crate derive_helper_shadowed;
#[macro_use(my_attr)] extern crate derive_helper_shadowed_2; macro_rules! my_attr { () => () } #[derive(MyTrait)] #[my_attr] // OK struct S; fn main() {}
test_channels.py
# Authors: CommPy contributors # License: BSD 3-Clause from __future__ import division, print_function # Python 2 compatibility from math import cos from numpy import ones, inf, sqrt, array, identity, zeros, dot, trace, einsum, absolute, exp, pi, fromiter, kron, \ zeros_like, empty from numpy.random import seed, choice, randn from numpy.testing import run_module_suite, assert_raises, assert_equal, assert_allclose, \ assert_array_equal, dec from commpy.channels import SISOFlatChannel, MIMOFlatChannel from commpy.utilities import signal_power class TestSISOFlatChannel: msg_length = 100000 real_mods = array((-1, 1)), array((-3, 3)) all_mods = array((-1, 1)), array((-3, 3)), \ array((-1 - 1j, -1 + 1j, 1 - 1j, 1 + 1j)), array((-3 - 3j, -3 + 3j, 3 - 3j, 3 + 3j)) def test_default_args(self): def check(chan): assert_equal(chan.noises, None, err_msg='Default noises is not None') assert_equal(chan.channel_gains, None, err_msg='Default channel gains is not None') assert_equal(chan.unnoisy_output, None, err_msg='Default unnoisy output is not None') chan = SISOFlatChannel() # Test output state before any propagation check(chan) # Test that noise standard deviation must be set before propagation with assert_raises(AssertionError): chan.propagate(array((1, 1))) # Test output state before any propagation check(chan) assert_equal(chan.nb_rx, 1, err_msg='SISO channel as more than 1 Rx') assert_equal(chan.nb_tx, 1, err_msg='SISO channel as more than 1 Tx') def test_fading(self): # Set seed seed(17121996) def check_chan_gain(mod, chan): msg = choice(mod, self.msg_length) chan.propagate(msg) P_msg = signal_power(msg) P_unnoisy = signal_power(chan.unnoisy_output) assert_allclose(P_unnoisy, P_msg, rtol=0.2, err_msg='Channel add or remove energy') # Test value checking in constructor construction with assert_raises(ValueError): SISOFlatChannel(0, (1, 1)) chan = SISOFlatChannel(0) # Test on real channel for mod in self.real_mods: # Test value checking after construction with assert_raises(ValueError): chan.fading_param = (1, 1) # Test without fading chan.fading_param = (1, 0) check_chan_gain(mod, chan) assert_array_equal(chan.channel_gains, ones(self.msg_length), err_msg='Channel fading while fading is disabled') # Test with Rayleigh fading chan.fading_param = (0, 1) check_chan_gain(mod, chan) assert_allclose(absolute(chan.channel_gains.mean()), 0, atol=2e-2, err_msg='Wrong channel mean with real channel') assert_allclose(chan.channel_gains.var(), 1, atol=0.2, err_msg='Wrong channel variance with real channel') # Test with rician fading chan.fading_param = (sqrt(2 / 3), 1 / 3) check_chan_gain(mod, chan) assert_allclose(chan.channel_gains.mean(), sqrt(2 / 3), atol=2e-2, err_msg='Wrong channel mean with real channel') assert_allclose(chan.channel_gains.var(), 1 / 3, atol=0.2, err_msg='Wrong channel variance with real channel') # Test on complex channel for mod in self.all_mods: # Test value checking after construction with assert_raises(ValueError): chan.fading_param = (1, 1) # Test without fading chan.fading_param = (1 + 0j, 0) check_chan_gain(mod, chan) assert_array_equal(chan.channel_gains, ones(self.msg_length), err_msg='Channel fading while fading is disabled') # Test with Rayleigh fading chan.fading_param = (0j, 1) check_chan_gain(mod, chan) assert_allclose(absolute(chan.channel_gains.mean()), 0, atol=2e-2, err_msg='Wrong channel mean with real channel') assert_allclose(chan.channel_gains.var(), 1, atol=0.2, err_msg='Wrong channel variance with real channel') # Test with rician fading chan.fading_param = (0.5 + 0.5j, 0.5) check_chan_gain(mod, chan) assert_allclose(absolute(chan.channel_gains.mean()), sqrt(0.5), atol=2e-2, err_msg='Wrong channel mean with real channel') assert_allclose(chan.channel_gains.var(), 0.5, atol=0.2, err_msg='Wrong channel variance with real channel') def test_noise_generation(self): # Set seed seed(17121996) def check_noise(mod, chan, corrected_SNR_lin): msg = choice(mod, self.msg_length) chan.propagate(msg) P_msg = signal_power(msg) # previous test asserted that channel neither add nor remove energy P_noise = signal_power(chan.noises) assert_allclose(absolute(chan.noises.mean()), 0., atol=5e-2, err_msg='Noise mean is not 0') if corrected_SNR_lin == inf: assert_allclose(P_noise, 0, atol=1e-2, err_msg='There is noise that should not be here') else: assert_allclose(P_msg / P_noise, corrected_SNR_lin, atol=0.2, err_msg='Wrong SNR') chan = SISOFlatChannel(fading_param=(1 + 0j, 0)) for mod in self.all_mods: chan.noise_std = 0 check_noise(mod, chan, inf) chan.set_SNR_lin(6, Es=signal_power(mod)) check_noise(mod, chan, 6) chan.set_SNR_lin(6, .5, signal_power(mod)) check_noise(mod, chan, 3) chan.set_SNR_dB(0, Es=signal_power(mod)) check_noise(mod, chan, 1) chan.set_SNR_dB(0, .5, signal_power(mod)) check_noise(mod, chan, .5) chan = SISOFlatChannel(fading_param=(1, 0)) for mod in self.real_mods: chan.noise_std = 0 check_noise(mod, chan, inf) chan.set_SNR_lin(6, Es=signal_power(mod)) check_noise(mod, chan, 6) chan.set_SNR_lin(6, .5, signal_power(mod)) check_noise(mod, chan, 3) chan.set_SNR_dB(0, Es=signal_power(mod)) check_noise(mod, chan, 1) chan.set_SNR_dB(0, .5, signal_power(mod)) check_noise(mod, chan, .5) def test_type_check(self): chan = SISOFlatChannel(0) with assert_raises(TypeError): chan.propagate(array((1, 1j))) def test_k_factor(self): # Real channel chan = SISOFlatChannel() assert_allclose(chan.k_factor, inf, err_msg='k-factor should be infinite without fading in SISO channels') chan.fading_param = 0, 1 assert_allclose(chan.k_factor, 0, err_msg='k-factor should be 0 with Rayleigh fading in SISO channels') chan.fading_param = sqrt(0.5), 0.5 assert_allclose(chan.k_factor, 1, err_msg='Wrong k-factor with rician fading in SISO channels') # Complex channel chan.fading_param = 1j, 0 assert_allclose(chan.k_factor, inf, err_msg='k-factor should be infinite without fading in SISO channels') chan.fading_param = 0j, 1 assert_allclose(chan.k_factor, 0, err_msg='k-factor should be 0 with Rayleigh fading in SISO channels') chan.fading_param = 0.5 + 0.5j, 0.5 assert_allclose(chan.k_factor, 1, err_msg='Wrong k-factor with rician fading in SISO channels') class MIMOTestCase(object): msg_length = 100000 real_mods = array((-1, 1)), array((-3, 3)) all_mods = array((-1, 1)), array((-3, 3)), \ array((-1 - 1j, -1 + 1j, 1 - 1j, 1 + 1j)), array((-3 - 3j, -3 + 3j, 3 - 3j, 3 + 3j)) @staticmethod def random_SDP_matrix(n): G = randn(n, n) dot(G, G.T, G) return G / trace(G) def test_symetric(self): nb_tx = 8 nb_rx = 8 self.do(nb_tx, nb_rx) def test_more_rx(self): nb_tx = 4 nb_rx = 8 self.do(nb_tx, nb_rx) def test_more_tx(self): nb_tx = 8 nb_rx = 4 self.do(nb_tx, nb_rx) def test_SIMO(self): nb_tx = 1 nb_rx = 8 self.do(nb_tx, nb_rx) def test_MISO(self): nb_tx = 8 nb_rx = 1 self.do(nb_tx, nb_rx) def test_SISO(self): nb_tx = 1 nb_rx = 1 self.do(nb_tx, nb_rx) class TestMIMODefaultArgs(MIMOTestCase): def
(self): super(TestMIMODefaultArgs, self).__init__() def do(self, nb_tx, nb_rx): def check(chan): assert_equal(chan.noises, None, err_msg='Default noises is not None') assert_equal(chan.channel_gains, None, err_msg='Default channel gains is not None') assert_equal(chan.unnoisy_output, None, err_msg='Default unnoisy output is not None') chan = MIMOFlatChannel(nb_tx, nb_rx) # Test output state before any propagation check(chan) # Test that noise standard deviation must be set before propagation with assert_raises(AssertionError): chan.propagate(array((1, 1))) # Test output state before any propagation check(chan) @dec.slow class TestMIMOFading(MIMOTestCase): def __init__(self): super(TestMIMOFading, self).__init__() def do(self, nb_tx, nb_rx): # Set seed seed(17121996) def check_chan_gain(mod, chan): msg = choice(mod, self.msg_length) chan.propagate(msg) P_msg = signal_power(msg) P_unnoisy = signal_power(chan.unnoisy_output) assert_allclose(P_unnoisy, P_msg * chan.nb_tx, rtol=0.2, err_msg='Channel add or remove energy') def expo_correlation(t, r): # Construct the exponent matrix expo_tx = fromiter((j - i for i in range(chan.nb_tx) for j in range(chan.nb_tx)), int, chan.nb_tx ** 2) expo_rx = fromiter((j - i for i in range(chan.nb_rx) for j in range(chan.nb_rx)), int, chan.nb_rx ** 2) # Reshape expo_tx = expo_tx.reshape(chan.nb_tx, chan.nb_tx) expo_rx = expo_rx.reshape(chan.nb_rx, chan.nb_rx) return t ** expo_tx, r ** expo_rx def check_correlation(chan, Rt, Rr): nb_ant = chan.nb_tx * chan.nb_rx Rdes = kron(Rt, Rr) H = chan.channel_gains Ract = zeros_like(Rdes) for i in range(len(H)): Ract += H[i].T.reshape(nb_ant, 1).dot(H[i].T.reshape(1, nb_ant).conj()) Ract /= len(H) assert_allclose(Rdes, Ract, atol=0.05, err_msg='Wrong correlation matrix') # Test value checking in constructor construction with assert_raises(ValueError): MIMOFlatChannel(nb_tx, nb_tx, 0, (ones((nb_tx, nb_tx)), ones((nb_tx, nb_tx)), ones((nb_rx, nb_rx)))) chan = MIMOFlatChannel(nb_tx, nb_rx, 0) prod_nb = nb_tx * nb_rx # Test on real channel for mod in self.real_mods: # Test value checking after construction with assert_raises(ValueError): chan.fading_param = (ones((nb_tx, nb_tx)), ones((nb_tx, nb_tx)), ones((nb_rx, nb_rx))) # Test with Rayleigh fading chan.fading_param = (zeros((nb_rx, nb_tx)), identity(nb_tx), identity(nb_rx)) check_chan_gain(mod, chan) # Test with rician fading mean = randn(nb_rx, nb_tx) mean *= sqrt(prod_nb * 0.75 / einsum('ij,ij->', absolute(mean), absolute(mean))) Rt = self.random_SDP_matrix(nb_tx) * sqrt(prod_nb) * 0.5 Rr = self.random_SDP_matrix(nb_rx) * sqrt(prod_nb) * 0.5 chan.fading_param = (mean, Rt, Rr) check_chan_gain(mod, chan) # Test helper functions chan.uncorr_rayleigh_fading(float) check_chan_gain(mod, chan) assert_allclose(chan.k_factor, 0, err_msg='Wrong k-factor with uncorrelated Rayleigh fading') mean = randn(nb_rx, nb_tx) chan.uncorr_rician_fading(mean, 10) check_chan_gain(mod, chan) assert_allclose(chan.k_factor, 10, err_msg='Wrong k-factor with uncorrelated rician fading') # Test on complex channel for mod in self.all_mods: # Test value checking after construction with assert_raises(ValueError): chan.fading_param = (ones((nb_tx, nb_tx)), ones((nb_tx, nb_tx)), ones((nb_rx, nb_rx))) # Test with Rayleigh fading chan.fading_param = (zeros((nb_rx, nb_tx), complex), identity(nb_tx), identity(nb_rx)) check_chan_gain(mod, chan) assert_allclose(chan.channel_gains.mean(), 0, atol=1e-2, err_msg='Wrong channel mean with complex channel') assert_allclose(chan.channel_gains.var(), 1, atol=5e-2, err_msg='Wrong channel variance with complex channel') # Test with rician fading mean = randn(nb_rx, nb_tx) + 1j * randn(nb_rx, nb_tx) mean *= sqrt(prod_nb * 0.75 / einsum('ij,ij->', absolute(mean), absolute(mean))) Rt = self.random_SDP_matrix(nb_tx) * sqrt(prod_nb) * 0.5 Rr = self.random_SDP_matrix(nb_rx) * sqrt(prod_nb) * 0.5 chan.fading_param = (mean, Rt, Rr) check_chan_gain(mod, chan) assert_allclose(chan.channel_gains.mean(0).real, mean.real, atol=0.1, err_msg='Wrong channel mean with complex channel') assert_allclose(chan.channel_gains.mean(0).imag, mean.imag, atol=0.1, err_msg='Wrong channel mean with complex channel') # Test helper functions chan.uncorr_rayleigh_fading(complex) check_chan_gain(mod, chan) assert_allclose(chan.k_factor, 0, err_msg='Wrong k-factor with uncorrelated Rayleigh fading') mean = randn(nb_rx, nb_tx) + randn(nb_rx, nb_tx) * 1j chan.uncorr_rician_fading(mean, 10) check_chan_gain(mod, chan) assert_allclose(chan.k_factor, 10, err_msg='Wrong k-factor with uncorrelated rician fading') chan.expo_corr_rayleigh_fading(exp(-0.2j * pi), exp(-0.1j * pi)) check_chan_gain(mod, chan) assert_allclose(chan.k_factor, 0, err_msg='Wrong k-factor with correlated Rayleigh fading') Rt, Rr = expo_correlation(exp(-0.2j * pi), exp(-0.1j * pi)) check_correlation(chan, Rt, Rr) mean = randn(nb_rx, nb_tx) + randn(nb_rx, nb_tx) * 1j chan.expo_corr_rician_fading(mean, 10, exp(-0.1j * pi), exp(-0.2j * pi)) check_chan_gain(mod, chan) assert_allclose(chan.k_factor, 10, err_msg='Wrong k-factor with correlated rician fading') # Test with beta > 0 chan.expo_corr_rayleigh_fading(exp(-0.2j * pi), exp(-0.1j * pi), 1, 0.5) check_chan_gain(mod, chan) assert_allclose(chan.k_factor, 0, err_msg='Wrong k-factor with correlated Rayleigh fading') mean = randn(nb_rx, nb_tx) + randn(nb_rx, nb_tx) * 1j chan.expo_corr_rician_fading(mean, 5, exp(-0.1j * pi), exp(-0.2j * pi), 3, 2) check_chan_gain(mod, chan) assert_allclose(chan.k_factor, 5, err_msg='Wrong k-factor with correlated rician fading') class TestMIMOSpectular(MIMOTestCase): def __init__(self): super(TestMIMOSpectular, self).__init__() def do(self, nb_tx, nb_rx): chan = MIMOFlatChannel(nb_tx, nb_rx, 0) # Test raising of ValueError with assert_raises(ValueError): chan.specular_compo(0, -1, 0, 1) with assert_raises(ValueError): chan.specular_compo(0, 1, 0, -1) # Test the result desired = empty((nb_rx, nb_tx), dtype=complex) for n in range(nb_rx): for m in range(nb_tx): desired[n, m] = exp(1j * 2 * pi * (n * 1 * cos(0.5) - m * 0.1 * cos(2))) assert_allclose(chan.specular_compo(2, 0.1, 0.5, 1), desired, rtol=0.02, err_msg='Wrong specular component') @dec.slow class TestMIMONoiseGeneration(MIMOTestCase): def __init__(self): super(TestMIMONoiseGeneration, self).__init__() def do(self, nb_tx, nb_rx): # Set seed seed(17121996) def check_noise(mod, chan, corrected_SNR_lin): msg = choice(mod, self.msg_length) chan.propagate(msg) P_msg = signal_power(msg) # previous test asserted that channel neither add nor remove energy P_noise = signal_power(chan.noises) assert_allclose(abs(chan.noises.mean()), 0., atol=0.5, err_msg='Noise mean is not 0') if corrected_SNR_lin == inf: assert_allclose(P_noise, 0, atol=1e-2, err_msg='There is noise that should not be here') else: assert_allclose(chan.nb_tx * P_msg / P_noise, corrected_SNR_lin, atol=0.2, err_msg='Wrong SNR') fading_param = zeros((nb_rx, nb_tx), complex), identity(nb_tx), identity(nb_rx) chan = MIMOFlatChannel(nb_tx, nb_rx, fading_param=fading_param) for mod in self.all_mods: chan.noise_std = 0 check_noise(mod, chan, inf) chan.set_SNR_lin(6, Es=signal_power(mod)) check_noise(mod, chan, 6) chan.set_SNR_lin(6, .5, signal_power(mod)) check_noise(mod, chan, 3) chan.set_SNR_dB(0, Es=signal_power(mod)) check_noise(mod, chan, 1) chan.set_SNR_dB(0, .5, signal_power(mod)) check_noise(mod, chan, .5) class TestMIMOTypeCheck(MIMOTestCase): def __init__(self): super(TestMIMOTypeCheck, self).__init__() def do(self, nb_tx, nb_rx): chan = MIMOFlatChannel(nb_tx, nb_rx, 0) with assert_raises(TypeError): chan.propagate(array((1, 1j))) class TestMIMOShapes(MIMOTestCase): def __init__(self): super(TestMIMOShapes, self).__init__() def do(self, nb_tx, nb_rx): # Without padding chan = MIMOFlatChannel(nb_tx, nb_rx, 0) out = chan.propagate(ones(nb_tx * 2)) assert_array_equal(chan.channel_gains.shape, (2, nb_rx, nb_tx), err_msg='Wrong channel shape without padding') assert_array_equal(chan.noises.shape, (2, nb_rx), err_msg='Wrong channel shape without padding') assert_array_equal(chan.unnoisy_output.shape, (2, nb_rx), err_msg='Wrong channel shape without padding') assert_array_equal(out.shape, (2, nb_rx), err_msg='Wrong channel shape without padding') # With padding chan = MIMOFlatChannel(nb_tx, nb_rx, 0) out = chan.propagate(ones(nb_tx * 2 + 1)) assert_array_equal(chan.channel_gains.shape, (3, nb_rx, nb_tx), err_msg='Wrong channel shape with padding') assert_array_equal(chan.noises.shape, (3, nb_rx), err_msg='Wrong channel shape with padding') assert_array_equal(chan.unnoisy_output.shape, (3, nb_rx), err_msg='Wrong channel shape with padding') assert_array_equal(out.shape, (3, nb_rx), err_msg='Wrong channel shape with padding') class TestMIMOkFactor(MIMOTestCase): def __init__(self): super(TestMIMOkFactor, self).__init__() def do(self, nb_tx, nb_rx): # Set seed seed(17121996) prod_nb = nb_tx * nb_rx # Real channel chan = MIMOFlatChannel(nb_tx, nb_rx) assert_allclose(chan.k_factor, 0, err_msg='k-factor should be 0 with Rayleigh fading in SISO channels') mean = randn(nb_rx, nb_tx) mean *= sqrt(prod_nb * 0.75 / einsum('ij,ij->', absolute(mean), absolute(mean))) Rs = self.random_SDP_matrix(nb_tx) * sqrt(prod_nb) * 0.5 Rr = self.random_SDP_matrix(nb_rx) * sqrt(prod_nb) * 0.5 chan.fading_param = mean, Rs, Rr assert_allclose(chan.k_factor, 3, err_msg='Wrong k-factor with rician fading in SISO channels') # Complex channel chan.fading_param = (zeros((nb_rx, nb_tx), complex), identity(nb_tx), identity(nb_rx)) assert_allclose(chan.k_factor, 0, err_msg='k-factor should be 0 with Rayleigh fading in SISO channels') mean = randn(nb_rx, nb_tx) + 1j * randn(nb_rx, nb_tx) mean *= sqrt(prod_nb * 0.75 / einsum('ij,ij->', absolute(mean), absolute(mean))) Rs = self.random_SDP_matrix(nb_tx) * sqrt(prod_nb) * 0.5 Rr = self.random_SDP_matrix(nb_rx) * sqrt(prod_nb) * 0.5 chan.fading_param = (mean, Rs, Rr) assert_allclose(chan.k_factor, 3, err_msg='Wrong k-factor with rician fading in SISO channels') if __name__ == "__main__": run_module_suite()
__init__