file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
main.rs
mod pe_viewer; use std::io; use clap::{Arg, App, AppSettings}; fn main()
{ let app = App::new("PEViewer-Rust") .setting(AppSettings::ArgRequiredElseHelp) .about("Command line tool to analyze PE binary") .version("0.1.0") .author("Lanph3re <[email protected]>") .arg(Arg::with_name("file") .short("f") .long("file") .takes_value(true) .help("PE file to analyze")) .get_matches(); pe_viewer::pe_viewer(app.value_of("file").unwrap()); }
Surface.ts
/** * The MIT License (MIT) * * Copyright (c) 2012-2018 DragonBones team and other contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ namespace dragonBones { /** * @internal */ export class
extends Bone { public static toString(): string { return "[class dragonBones.Surface]"; } private _dX: number; private _dY: number; private _k: number; private _kX: number; private _kY: number; public readonly _vertices: Array<number> = []; public readonly _deformVertices: Array<number> = []; /** * - x1, y1, x2, y2, x3, y3, x4, y4, d1X, d1Y, d2X, d2Y */ private readonly _hullCache: Array<number> = []; /** * - Inside [flag, a, b, c, d, tx, ty], Outside [flag, a, b, c, d, tx, ty] */ private readonly _matrixCahce: Array<number> = []; protected _onClear(): void { super._onClear(); this._dX = 0.0; this._dY = 0.0; this._k = 0.0; this._kX = 0.0; this._kY = 0.0; this._vertices.length = 0; this._deformVertices.length = 0; this._matrixCahce.length = 0; this._hullCache.length = 0; } private _getAffineTransform( x: number, y: number, lX: number, lY: number, aX: number, aY: number, bX: number, bY: number, cX: number, cY: number, transform: Transform, matrix: Matrix, isDown: boolean ): void { const dabX = bX - aX; const dabY = bY - aY; const dacX = cX - aX; const dacY = cY - aY; transform.rotation = Math.atan2(dabY, dabX); transform.skew = Math.atan2(dacY, dacX) - Math.PI * 0.5 - transform.rotation; if (isDown) { transform.rotation += Math.PI; } transform.scaleX = Math.sqrt(dabX * dabX + dabY * dabY) / lX; transform.scaleY = Math.sqrt(dacX * dacX + dacY * dacY) / lY; transform.toMatrix(matrix); transform.x = matrix.tx = aX - (matrix.a * x + matrix.c * y); transform.y = matrix.ty = aY - (matrix.b * x + matrix.d * y); } private _updateVertices(): void { const originalVertices = (this._boneData as SurfaceData).vertices; const vertices = this._vertices; const animationVertices = this._deformVertices; if (this._parent !== null) { if (this._parent._boneData.type === BoneType.Surface) { for (let i = 0, l = originalVertices.length; i < l; i += 2) { const x = originalVertices[i] + animationVertices[i]; const y = originalVertices[i + 1] + animationVertices[i]; const matrix = (this._parent as Surface)._getGlobalTransformMatrix(x, y); // vertices[i] = matrix.a * x + matrix.c * y + matrix.tx; vertices[i + 1] = matrix.b * x + matrix.d * y + matrix.ty; } } else { const parentMatrix = this._parent.globalTransformMatrix; for (let i = 0, l = originalVertices.length; i < l; i += 2) { const x = originalVertices[i] + animationVertices[i]; const y = originalVertices[i + 1] + animationVertices[i + 1]; // vertices[i] = parentMatrix.a * x + parentMatrix.c * y + parentMatrix.tx; vertices[i + 1] = parentMatrix.b * x + parentMatrix.d * y + parentMatrix.ty; } } } else { for (let i = 0, l = originalVertices.length; i < l; i += 2) { vertices[i] = originalVertices[i] + animationVertices[i]; vertices[i + 1] = originalVertices[i + 1] + animationVertices[i + 1]; } } } protected _updateGlobalTransformMatrix(isCache: boolean): void { // tslint:disable-next-line:no-unused-expression isCache; const segmentXD = (this._boneData as SurfaceData).segmentX * 2; const lastIndex = this._vertices.length - 2; const lA = 200.0; // const raX = this._vertices[0]; const raY = this._vertices[1]; const rbX = this._vertices[segmentXD]; const rbY = this._vertices[segmentXD + 1]; const rcX = this._vertices[lastIndex]; const rcY = this._vertices[lastIndex + 1]; const rdX = this._vertices[lastIndex - segmentXD]; const rdY = this._vertices[lastIndex - segmentXD + 1]; // const dacX = raX + (rcX - raX) * 0.5; const dacY = raY + (rcY - raY) * 0.5; const dbdX = rbX + (rdX - rbX) * 0.5; const dbdY = rbY + (rdY - rbY) * 0.5; const aX = dacX + (dbdX - dacX) * 0.5; const aY = dacY + (dbdY - dacY) * 0.5; const bX = rbX + (rcX - rbX) * 0.5; const bY = rbY + (rcY - rbY) * 0.5; const cX = rdX + (rcX - rdX) * 0.5; const cY = rdY + (rcY - rdY) * 0.5; // this._globalDirty = false; this._getAffineTransform(0.0, 0.0, lA, lA, aX, aY, bX, bY, cX, cY, this.global, this.globalTransformMatrix, false); } public _getGlobalTransformMatrix(x: number, y: number): Matrix { const lB = 1000.0; if (x < -lB || lB < x || y < -lB || lB < y) { return this.globalTransformMatrix; } let isDown = false; const lA = 200.0; const surfaceData = this._boneData as SurfaceData; const segmentX = surfaceData.segmentX; const segmentY = surfaceData.segmentY; const segmentXD = surfaceData.segmentX * 2; const dX = this._dX; const dY = this._dY; const indexX = Math.floor((x + lA) / dX); const indexY = Math.floor((y + lA) / dY); let matrixIndex = 0; let pX = indexX * dX - lA; let pY = indexY * dY - lA; const matrices = this._matrixCahce; const helpMatrix = Surface._helpMatrix; if (x < -lA) { if (y < -lA || y > lA) { return this.globalTransformMatrix; } isDown = y > this._kX * (x + lA) + pY; matrixIndex = ((segmentX * (segmentY + 1) + segmentX * 2 + segmentY + indexY) * 2 + (isDown ? 1 : 0)) * 7; if (this._matrixCahce[matrixIndex] > 0.0) { helpMatrix.copyFromArray(matrices, matrixIndex + 1); } else { const vertexIndex = indexY * (segmentXD + 2); const ddX = this._hullCache[4]; const ddY = this._hullCache[5]; const sX = this._hullCache[2] - (segmentY - indexY) * ddX; const sY = this._hullCache[3] - (segmentY - indexY) * ddY; const vertices = this._vertices; if (isDown) { this._getAffineTransform( -lA, pY + dY, lB - lA, dY, vertices[vertexIndex + segmentXD + 2], vertices[vertexIndex + segmentXD + 3], sX + ddX, sY + ddY, vertices[vertexIndex], vertices[vertexIndex + 1], Surface._helpTransform, helpMatrix, true); } else { this._getAffineTransform( -lB, pY, lB - lA, dY, sX, sY, vertices[vertexIndex], vertices[vertexIndex + 1], sX + ddX, sY + ddY, Surface._helpTransform, helpMatrix, false); } matrices[matrixIndex] = 1.0; matrices[matrixIndex + 1] = helpMatrix.a; matrices[matrixIndex + 2] = helpMatrix.b; matrices[matrixIndex + 3] = helpMatrix.c; matrices[matrixIndex + 4] = helpMatrix.d; matrices[matrixIndex + 5] = helpMatrix.tx; matrices[matrixIndex + 6] = helpMatrix.ty; } } else if (x > lA) { if (y < -lA || y > lA) { return this.globalTransformMatrix; } isDown = y > this._kX * (x - lB) + pY; matrixIndex = ((segmentX * (segmentY + 1) + segmentX + indexY) * 2 + (isDown ? 1 : 0)) * 7; if (this._matrixCahce[matrixIndex] > 0.0) { helpMatrix.copyFromArray(matrices, matrixIndex + 1); } else { const vertexIndex = (indexY + 1) * (segmentXD + 2) - 2; const ddX = this._hullCache[4]; const ddY = this._hullCache[5]; const sX = this._hullCache[0] + indexY * ddX; const sY = this._hullCache[1] + indexY * ddY; const vertices = this._vertices; if (isDown) { this._getAffineTransform( lB, pY + dY, lB - lA, dY, sX + ddX, sY + ddY, vertices[vertexIndex + segmentXD + 2], vertices[vertexIndex + segmentXD + 3], sX, sY, Surface._helpTransform, helpMatrix, true); } else { this._getAffineTransform( lA, pY, lB - lA, dY, vertices[vertexIndex], vertices[vertexIndex + 1], sX, sY, vertices[vertexIndex + segmentXD + 2], vertices[vertexIndex + segmentXD + 3], Surface._helpTransform, helpMatrix, false); } matrices[matrixIndex] = 1.0; matrices[matrixIndex + 1] = helpMatrix.a; matrices[matrixIndex + 2] = helpMatrix.b; matrices[matrixIndex + 3] = helpMatrix.c; matrices[matrixIndex + 4] = helpMatrix.d; matrices[matrixIndex + 5] = helpMatrix.tx; matrices[matrixIndex + 6] = helpMatrix.ty; } } else if (y < -lA) { if (x < -lA || x > lA) { return this.globalTransformMatrix; } isDown = y > this._kY * (x - pX - dX) - lB; matrixIndex = (segmentX * (segmentY + 1) + indexX * 2 + (isDown ? 1 : 0)) * 7; if (this._matrixCahce[matrixIndex] > 0.0) { helpMatrix.copyFromArray(matrices, matrixIndex + 1); } else { const vertexIndex = indexX * 2; const ddX = this._hullCache[10]; const ddY = this._hullCache[11]; const sX = this._hullCache[8] + indexX * ddX; const sY = this._hullCache[9] + indexX * ddY; const vertices = this._vertices; if (isDown) { this._getAffineTransform( pX + dX, -lA, dX, lB - lA, vertices[vertexIndex + 2], vertices[vertexIndex + 3], vertices[vertexIndex], vertices[vertexIndex + 1], sX + ddX, sY + ddY, Surface._helpTransform, helpMatrix, true); } else { this._getAffineTransform( pX, -lB, dX, lB - lA, sX, sY, sX + ddX, sY + ddY, vertices[vertexIndex], vertices[vertexIndex + 1], Surface._helpTransform, helpMatrix, false); } matrices[matrixIndex] = 1.0; matrices[matrixIndex + 1] = helpMatrix.a; matrices[matrixIndex + 2] = helpMatrix.b; matrices[matrixIndex + 3] = helpMatrix.c; matrices[matrixIndex + 4] = helpMatrix.d; matrices[matrixIndex + 5] = helpMatrix.tx; matrices[matrixIndex + 6] = helpMatrix.ty; } } else if (y > lA) { if (x < -lA || x > lA) { return this.globalTransformMatrix; } isDown = y > this._kY * (x - pX - dX) + lA; matrixIndex = ((segmentX * (segmentY + 1) + segmentX + segmentY + indexY) * 2 + (isDown ? 1 : 0)) * 7; if (this._matrixCahce[matrixIndex] > 0.0) { helpMatrix.copyFromArray(matrices, matrixIndex + 1); } else { const vertexIndex = segmentY * (segmentXD + 2) + indexX * 2; const ddX = this._hullCache[10]; const ddY = this._hullCache[11]; const sX = this._hullCache[6] - (segmentX - indexX) * ddX; const sY = this._hullCache[7] - (segmentX - indexX) * ddY; const vertices = this._vertices; if (isDown) { this._getAffineTransform( pX + dX, lB, dX, lB - lA, sX + ddX, sY + ddY, sX, sY, vertices[vertexIndex + 2], vertices[vertexIndex + 3], Surface._helpTransform, helpMatrix, true); } else { this._getAffineTransform( pX, lA, dX, lB - lA, vertices[vertexIndex], vertices[vertexIndex + 1], vertices[vertexIndex + 2], vertices[vertexIndex + 3], sX, sY, Surface._helpTransform, helpMatrix, false); } matrices[matrixIndex] = 1.0; matrices[matrixIndex + 1] = helpMatrix.a; matrices[matrixIndex + 2] = helpMatrix.b; matrices[matrixIndex + 3] = helpMatrix.c; matrices[matrixIndex + 4] = helpMatrix.d; matrices[matrixIndex + 5] = helpMatrix.tx; matrices[matrixIndex + 6] = helpMatrix.ty; } } else { isDown = y > this._k * (x - pX - dX) + pY; matrixIndex = ((segmentX * indexY + indexX) * 2 + (isDown ? 1 : 0)) * 7; if (this._matrixCahce[matrixIndex] > 0.0) { helpMatrix.copyFromArray(matrices, matrixIndex + 1); } else { const vertexIndex = indexX * 2 + indexY * (segmentXD + 2); const vertices = this._vertices; if (isDown) { this._getAffineTransform( pX + dX, pY + dY, dX, dY, vertices[vertexIndex + segmentXD + 4], vertices[vertexIndex + segmentXD + 5], vertices[vertexIndex + segmentXD + 2], vertices[vertexIndex + segmentXD + 3], vertices[vertexIndex + 2], vertices[vertexIndex + 3], Surface._helpTransform, helpMatrix, true); } else { this._getAffineTransform( pX, pY, dX, dY, vertices[vertexIndex], vertices[vertexIndex + 1], vertices[vertexIndex + 2], vertices[vertexIndex + 3], vertices[vertexIndex + segmentXD + 2], vertices[vertexIndex + segmentXD + 3], Surface._helpTransform, helpMatrix, false); } matrices[matrixIndex] = 1.0; matrices[matrixIndex + 1] = helpMatrix.a; matrices[matrixIndex + 2] = helpMatrix.b; matrices[matrixIndex + 3] = helpMatrix.c; matrices[matrixIndex + 4] = helpMatrix.d; matrices[matrixIndex + 5] = helpMatrix.tx; matrices[matrixIndex + 6] = helpMatrix.ty; } } return helpMatrix; } /** * @internal * @private */ public init(surfaceData: SurfaceData, armatureValue: Armature): void { if (this._boneData !== null) { return; } super.init(surfaceData, armatureValue); const segmentX = surfaceData.segmentX; const segmentY = surfaceData.segmentY; const vertexCount = surfaceData.vertices.length; const lB = 1000.0; const lA = 200.0; // this._dX = lA * 2.0 / segmentX; this._dY = lA * 2.0 / segmentY; this._k = -this._dY / this._dX; this._kX = -this._dY / (lB - lA); this._kY = -(lB - lA) / this._dX; this._vertices.length = vertexCount; this._deformVertices.length = vertexCount; this._matrixCahce.length = (segmentX * segmentY + segmentX * 2 + segmentY * 2) * 2 * 7; this._hullCache.length = 10; for (let i = 0; i < vertexCount; ++i) { this._deformVertices[i] = 0.0; } } /** * @internal */ public update(cacheFrameIndex: number): void { this._blendState.dirty = false; if (cacheFrameIndex >= 0 && this._cachedFrameIndices !== null) { const cachedFrameIndex = this._cachedFrameIndices[cacheFrameIndex]; if (cachedFrameIndex >= 0 && this._cachedFrameIndex === cachedFrameIndex) { // Same cache. this._transformDirty = false; } else if (cachedFrameIndex >= 0) { // Has been Cached. this._transformDirty = true; this._cachedFrameIndex = cachedFrameIndex; } else { if (this._hasConstraint) { // Update constraints. for (const constraint of this._armature._constraints) { if (constraint._root === this) { constraint.update(); } } } if ( this._transformDirty || (this._parent !== null && this._parent._childrenTransformDirty) ) { // Dirty. this._transformDirty = true; this._cachedFrameIndex = -1; } else if (this._cachedFrameIndex >= 0) { // Same cache, but not set index yet. this._transformDirty = false; this._cachedFrameIndices[cacheFrameIndex] = this._cachedFrameIndex; } else { // Dirty. this._transformDirty = true; this._cachedFrameIndex = -1; } } } else { if (this._hasConstraint) { // Update constraints. for (const constraint of this._armature._constraints) { if (constraint._root === this) { constraint.update(); } } } if (this._transformDirty || (this._parent !== null && this._parent._childrenTransformDirty)) { // Dirty. cacheFrameIndex = -1; this._transformDirty = true; this._cachedFrameIndex = -1; } } if (this._transformDirty) { this._transformDirty = false; this._childrenTransformDirty = true; // for (let i = 0, l = this._matrixCahce.length; i < l; i += 7) { this._matrixCahce[i] = -1.0; } // this._updateVertices(); // if (this._cachedFrameIndex < 0) { const isCache = cacheFrameIndex >= 0; if (this._localDirty) { this._updateGlobalTransformMatrix(isCache); } if (isCache && this._cachedFrameIndices !== null) { this._cachedFrameIndex = this._cachedFrameIndices[cacheFrameIndex] = this._armature._armatureData.setCacheFrame(this.globalTransformMatrix, this.global); } } else { this._armature._armatureData.getCacheFrame(this.globalTransformMatrix, this.global, this._cachedFrameIndex); } // Update hull vertices. const lB = 1000.0; const lA = 200.0; const ddX = 2 * this.global.x; const ddY = 2 * this.global.y; // const helpPoint = Surface._helpPoint; this.globalTransformMatrix.transformPoint(lB, -lA, helpPoint); this._hullCache[0] = helpPoint.x; this._hullCache[1] = helpPoint.y; this._hullCache[2] = ddX - helpPoint.x; this._hullCache[3] = ddY - helpPoint.y; this.globalTransformMatrix.transformPoint(0.0, this._dY, helpPoint, true); this._hullCache[4] = helpPoint.x; this._hullCache[5] = helpPoint.y; // this.globalTransformMatrix.transformPoint(lA, lB, helpPoint); this._hullCache[6] = helpPoint.x; this._hullCache[7] = helpPoint.y; this._hullCache[8] = ddX - helpPoint.x; this._hullCache[9] = ddY - helpPoint.y; this.globalTransformMatrix.transformPoint(this._dX, 0.0, helpPoint, true); this._hullCache[10] = helpPoint.x; this._hullCache[11] = helpPoint.y; } else if (this._childrenTransformDirty) { this._childrenTransformDirty = false; } this._localDirty = true; } } }
Surface
error.ts
// Copyright (c) Microsoft Corporation. // Licensed under the MIT license. import { Assert, CommonError, StringUtils } from "../common"; import { Token } from "../language"; import { Localization, LocalizationUtils } from "../localization"; import { ParseState } from "./parseState"; export type TParseError = CommonError.CommonError | ParseError; export type TInnerParseError = | ExpectedAnyTokenKindError | ExpectedCsvContinuationError | ExpectedGeneralizedIdentifierError | ExpectedTokenKindError | InvalidPrimitiveTypeError | RequiredParameterAfterOptionalParameterError | UnterminatedSequence | UnusedTokensRemainError; export const enum CsvContinuationKind { DanglingComma = "DanglingComma", LetExpression = "LetExpression", } export const enum SequenceKind { Bracket = "Bracket", Parenthesis = "Parenthesis", } export class ParseError extends Error { constructor(readonly innerError: TInnerParseError, readonly state: ParseState) { super(innerError.message); Object.setPrototypeOf(this, ParseError.prototype); } } export class ExpectedCsvContinuationError extends Error { constructor( locale: string, readonly kind: CsvContinuationKind, readonly maybeFoundToken: TokenWithColumnNumber | undefined, ) { super(Localization.error_parse_csvContinuation(LocalizationUtils.getLocalizationTemplates(locale), kind)); Object.setPrototypeOf(this, ExpectedCsvContinuationError.prototype); } } export class
extends Error { constructor( locale: string, readonly expectedAnyTokenKinds: ReadonlyArray<Token.TokenKind>, readonly maybeFoundToken: TokenWithColumnNumber | undefined, ) { super( Localization.error_parse_expectAnyTokenKind( LocalizationUtils.getLocalizationTemplates(locale), expectedAnyTokenKinds, maybeFoundToken, ), ); Object.setPrototypeOf(this, ExpectedAnyTokenKindError.prototype); } } export class ExpectedTokenKindError extends Error { constructor( locale: string, readonly expectedTokenKind: Token.TokenKind, readonly maybeFoundToken: TokenWithColumnNumber | undefined, ) { super( Localization.error_parse_expectTokenKind( LocalizationUtils.getLocalizationTemplates(locale), expectedTokenKind, maybeFoundToken, ), ); Object.setPrototypeOf(this, ExpectedTokenKindError.prototype); } } export class ExpectedGeneralizedIdentifierError extends Error { constructor(locale: string, readonly maybeFoundToken: TokenWithColumnNumber | undefined) { super( Localization.error_parse_expectGeneralizedIdentifier( LocalizationUtils.getLocalizationTemplates(locale), maybeFoundToken, ), ); Object.setPrototypeOf(this, ExpectedGeneralizedIdentifierError.prototype); } } export class InvalidPrimitiveTypeError extends Error { constructor(locale: string, readonly token: Token.Token, readonly positionStart: StringUtils.GraphemePosition) { super(Localization.error_parse_invalidPrimitiveType(LocalizationUtils.getLocalizationTemplates(locale), token)); Object.setPrototypeOf(this, InvalidPrimitiveTypeError.prototype); } } export class RequiredParameterAfterOptionalParameterError extends Error { constructor( locale: string, readonly missingOptionalToken: Token.Token, readonly positionStart: StringUtils.GraphemePosition, ) { super( Localization.error_parse_requiredParameterAfterOptional(LocalizationUtils.getLocalizationTemplates(locale)), ); Object.setPrototypeOf(this, RequiredParameterAfterOptionalParameterError.prototype); } } export class UnterminatedSequence extends Error { constructor( locale: string, readonly kind: SequenceKind, readonly startToken: Token.Token, readonly positionStart: StringUtils.GraphemePosition, ) { super(Localization.error_parse_unterminated_sequence(LocalizationUtils.getLocalizationTemplates(locale), kind)); Object.setPrototypeOf(this, UnterminatedSequence.prototype); } } export class UnusedTokensRemainError extends Error { constructor( locale: string, readonly firstUnusedToken: Token.Token, readonly positionStart: StringUtils.GraphemePosition, ) { super(Localization.error_parse_unusedTokens(LocalizationUtils.getLocalizationTemplates(locale))); Object.setPrototypeOf(this, UnusedTokensRemainError.prototype); } } export interface TokenWithColumnNumber { readonly token: Token.Token; readonly columnNumber: number; } export function assertIsParseError(error: any): error is ParseError { Assert.isTrue(isParseError(error), "isParseError(error)"); return true; } export function isParseError(error: any): error is ParseError { return error instanceof ParseError; } export function isTParseError(error: any): error is TParseError { return isParseError(error) || CommonError.isCommonError(error); } export function isTInnerParseError(x: any): x is TInnerParseError { return ( x instanceof ExpectedAnyTokenKindError || x instanceof ExpectedCsvContinuationError || x instanceof ExpectedGeneralizedIdentifierError || x instanceof ExpectedTokenKindError || x instanceof InvalidPrimitiveTypeError || x instanceof RequiredParameterAfterOptionalParameterError || x instanceof UnterminatedSequence || x instanceof UnusedTokensRemainError ); } export function maybeTokenFrom(error: TInnerParseError): Token.Token | undefined { if ( (error instanceof ExpectedAnyTokenKindError || error instanceof ExpectedCsvContinuationError || error instanceof ExpectedGeneralizedIdentifierError || error instanceof ExpectedTokenKindError) && error.maybeFoundToken ) { return error.maybeFoundToken.token; } else if (error instanceof InvalidPrimitiveTypeError) { return error.token; } else if (error instanceof RequiredParameterAfterOptionalParameterError) { return error.missingOptionalToken; } else if (error instanceof UnterminatedSequence) { return error.startToken; } else if (error instanceof UnusedTokensRemainError) { return error.firstUnusedToken; } else { return undefined; } }
ExpectedAnyTokenKindError
0006_auto_20170523_1706.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.1 on 2017-05-23 09:06 from __future__ import unicode_literals from django.db import migrations, models class
(migrations.Migration): dependencies = [ ('filemonitor', '0005_auto_20170523_1541'), ] operations = [ migrations.RenameField( model_name='actualfile', old_name='status', new_name='ckstatus', ), migrations.AddField( model_name='actualfile', name='dlstatus', field=models.IntegerField(default=0), ), migrations.AlterField( model_name='actualfile', name='remark', field=models.TextField(default=''), ), ]
Migration
installGKE.go
// Copyright © 2019 NAME HERE <EMAIL ADDRESS> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import (
keptnutils "github.com/keptn/go-utils/pkg/utils" ) type gkeCredentials struct { ClusterName string `json:"clusterName"` ClusterZone string `json:"clusterZone"` GkeProject string `json:"gkeProject"` } type gkePlatform struct { creds *gkeCredentials } func newGKEPlatform() *gkePlatform { return &gkePlatform{ creds: &gkeCredentials{}, } } func (p gkePlatform) getCreds() interface{} { return p.creds } func (p gkePlatform) checkRequirements() error { _, err := getGcloudUser() return err } func (p gkePlatform) checkCreds() error { if p.creds.ClusterName == "" || p.creds.ClusterZone == "" { return errors.New("Incomplete credentials") } authenticated, err := p.authenticateAtCluster() if err != nil { return err } if !authenticated { return errors.New("Cannot authenticate at cluster " + p.creds.ClusterName) } return nil } func (p gkePlatform) readCreds() { if p.creds.ClusterName == "" || p.creds.ClusterZone == "" || p.creds.GkeProject == "" { p.creds.ClusterName, p.creds.ClusterZone, p.creds.GkeProject = getGkeClusterInfo() } connectionSuccessful := false for !connectionSuccessful { p.readClusterName() p.readClusterZone() p.readGkeProject() connectionSuccessful, _ = p.authenticateAtCluster() } } func (p gkePlatform) readClusterName() { readUserInput(&p.creds.ClusterName, "^(([a-zA-Z0-9]+-)*[a-zA-Z0-9]+)$", "Cluster Name", "Please enter a valid Cluster Name.", ) } func (p gkePlatform) readClusterZone() { readUserInput(&p.creds.ClusterZone, "^(([a-zA-Z0-9]+-)*[a-zA-Z0-9]+)$", "Cluster Zone", "Please enter a valid Cluster Zone.", ) } func (p gkePlatform) readGkeProject() { readUserInput(&p.creds.GkeProject, "^(([a-zA-Z0-9]+-)*[a-zA-Z0-9]+)$", "GKE Project", "Please enter a valid GKE Project.", ) } func (p gkePlatform) authenticateAtCluster() (bool, error) { _, err := keptnutils.ExecuteCommand("gcloud", []string{ "container", "clusters", "get-credentials", p.creds.ClusterName, "--zone", p.creds.ClusterZone, "--project", p.creds.GkeProject, }) if err != nil { fmt.Println("Could not connect to cluster. " + "Please verify that you have entered the correct information. Error: " + err.Error()) return false, err } return true, nil } func getGkeClusterInfo() (string, string, string) { // try to get current cluster from kubectl config out, err := getKubeContext() if err != nil { return "", "", "" } clusterInfo := strings.TrimSpace(strings.Replace(string(out), "\r\n", "\n", -1)) if !strings.HasPrefix(clusterInfo, gke) { return "", "", "" } clusterInfoArray := strings.Split(clusterInfo, "_") if len(clusterInfoArray) < 4 { return "", "", "" } return clusterInfoArray[3], clusterInfoArray[2], clusterInfoArray[1] } func getGcloudUser() (string, error) { out, err := keptnutils.ExecuteCommand("gcloud", []string{ "config", "get-value", "account", }) if err != nil { return "", fmt.Errorf("Please configure your gcloud: %s", err) } // This command returns the account in the first line return strings.Split(strings.Replace(string(out), "\r\n", "\n", -1), "\n")[0], nil } func (p gkePlatform) printCreds() { fmt.Println("Cluster Name: " + p.creds.ClusterName) fmt.Println("Cluster Zone: " + p.creds.ClusterZone) fmt.Println("GKE Project: " + p.creds.GkeProject) }
"errors" "fmt" "strings"
config_20200117182523.js
'use strict'; module.exports = { url: 'https://lumen.netlify.com', pathPrefix: '/', title: 'Lock Groove', subtitle: 'Pellentesque odio nisi, euismod in, pharetra a, ultricies in, diam. Sed arcu.', copyright: '© All rights reserved.',
disqusShortname: '', postsPerPage: 4, googleAnalyticsId: 'UA-73379983-2', useKatex: false, menu: [ { label: 'Articles', path: '/' }, { label: 'About me', path: '/pages/about' }, { label: 'Contact me', path: '/pages/contacts' } ], author: { name: 'Todd P.', photo: '/photo.jpg', bio: 'Blogging about dev and the unground lifestyle with Gatsby!', contacts: { email: '', facebook: '#', telegram: '#', twitter: 'tnpete', github: 'tddpt', rss: '', vkontakte: '', linkedin: '#', instagram: '#', line: '', gitlab: '', weibo: '', codepen: '', youtube: '', soundcloud: '', } } };
toindex-byteoffset.js
// Copyright (C) 2016 the V8 project authors. All rights reserved. // This code is governed by the BSD license found in the LICENSE file. /*--- esid: sec-dataview.prototype.setfloat64 es6id: 24.2.4.14 description: > ToIndex conversions on byteOffset info: | 24.2.4.14 DataView.prototype.setFloat64 ( byteOffset, value [ , littleEndian ] ) 1. Let v be the this value. 2. If littleEndian is not present, let littleEndian be false. 3. Return ? SetViewValue(v, byteOffset, littleEndian, "Float64", value). 24.2.1.2 SetViewValue ( view, requestIndex, isLittleEndian, type, value ) ... 4. Let getIndex be ? ToIndex(requestIndex). ... features: [DataView.prototype.getFloat64] ---*/ var buffer = new ArrayBuffer(12); var sample = new DataView(buffer, 0); var obj1 = { valueOf: function() { return 3; } }; var obj2 = { toString: function() { return 4; } }; sample.setFloat64(0, 0); sample.setFloat64(-0, 42); assert.sameValue(sample.getFloat64(0), 42, "-0"); sample.setFloat64(3, 0); sample.setFloat64(obj1, 42); assert.sameValue(sample.getFloat64(3), 42, "object's valueOf"); sample.setFloat64(4, 0); sample.setFloat64(obj2, 42); assert.sameValue(sample.getFloat64(4), 42, "object's toString"); sample.setFloat64(0, 0); sample.setFloat64("", 42); assert.sameValue(sample.getFloat64(0), 42, "the Empty string"); sample.setFloat64(0, 0); sample.setFloat64("0", 42); assert.sameValue(sample.getFloat64(0), 42, "string '0'"); sample.setFloat64(2, 0); sample.setFloat64("2", 42); assert.sameValue(sample.getFloat64(2), 42, "string '2'"); sample.setFloat64(1, 0); sample.setFloat64(true, 42); assert.sameValue(sample.getFloat64(1), 42, "true"); sample.setFloat64(0, 0); sample.setFloat64(false, 42); assert.sameValue(sample.getFloat64(0), 42, "false"); sample.setFloat64(0, 0); sample.setFloat64(NaN, 42); assert.sameValue(sample.getFloat64(0), 42, "NaN"); sample.setFloat64(0, 0); sample.setFloat64(null, 42); assert.sameValue(sample.getFloat64(0), 42, "null"); sample.setFloat64(0, 0); sample.setFloat64(0.1, 42); assert.sameValue(sample.getFloat64(0), 42, "0.1"); sample.setFloat64(0, 0); sample.setFloat64(0.9, 42); assert.sameValue(sample.getFloat64(0), 42, "0.9");
sample.setFloat64(1, 0); sample.setFloat64(1.9, 42); assert.sameValue(sample.getFloat64(1), 42, "1.9"); sample.setFloat64(0, 0); sample.setFloat64(-0.1, 42); assert.sameValue(sample.getFloat64(0), 42, "-0.1"); sample.setFloat64(0, 0); sample.setFloat64(-0.99999, 42); assert.sameValue(sample.getFloat64(0), 42, "-0.99999"); sample.setFloat64(0, 0); sample.setFloat64(undefined, 42); assert.sameValue(sample.getFloat64(0), 42, "undefined"); sample.setFloat64(0, 7); sample.setFloat64(); assert.sameValue(sample.getFloat64(0), NaN, "no arg");
sample.setFloat64(1, 0); sample.setFloat64(1.1, 42); assert.sameValue(sample.getFloat64(1), 42, "1.1");
sse2.go
package codegen import ( "fmt" "reflect" "strconv" "strings" "golang.org/x/tools/go/ssa" "github.com/bjwbell/gensimd/simd" ) var sse2intructions = []Intrinsic{ { Name: "AddEpi64", InstructionName: "PADDQ", VarOps: []VarOp{ { Var: Variable{reflect.TypeOf(simd.M128i{})}, Op: Operand{Flags: In, Reg: XMM_REG, NamedReg: REG_INVALID}}, { Var: Variable{reflect.TypeOf(simd.M128i{})}, Op: Operand{Flags: In | Out, Reg: XMM_REG, NamedReg: REG_INVALID}, }, }, ResultIdxOp: 1, }, { Name: "SubEpi64", InstructionName: "PSUBQ", VarOps: []VarOp{ { Var: Variable{reflect.TypeOf(simd.M128i{})}, Op: Operand{Flags: In, Reg: XMM_REG, NamedReg: REG_INVALID}}, { Var: Variable{reflect.TypeOf(simd.M128i{})}, Op: Operand{Flags: In | Out, Reg: XMM_REG, NamedReg: REG_INVALID}, }, }, ResultIdxOp: 1, }, { Name: "MulEpu32", InstructionName: "PMULULQ", VarOps: []VarOp{ { Var: Variable{reflect.TypeOf(simd.M128i{})}, Op: Operand{Flags: In, Reg: XMM_REG, NamedReg: REG_INVALID}}, { Var: Variable{reflect.TypeOf(simd.M128i{})}, Op: Operand{Flags: In | Out, Reg: XMM_REG, NamedReg: REG_INVALID}, }, }, ResultIdxOp: 1, }, { Name: "ShufflehiEpi16", InstructionName: "PSHUFHW", VarOps: []VarOp{ { Var: Variable{reflect.TypeOf(simd.M128i{})}, Op: Operand{Flags: In, Reg: XMM_REG, NamedReg: REG_INVALID}}, { Var: Variable{reflect.TypeOf(simd.M128i{})}, Op: Operand{Flags: In | Out, Reg: XMM_REG, NamedReg: REG_INVALID}, }, }, ResultIdxOp: 1, }, { Name: "ShuffleloEpi16", InstructionName: "PSHUFLW", VarOps: []VarOp{ { Var: Variable{reflect.TypeOf(simd.M128i{})}, Op: Operand{Flags: In, Reg: XMM_REG, NamedReg: REG_INVALID}}, { Var: Variable{reflect.TypeOf(simd.M128i{})}, Op: Operand{Flags: In | Out, Reg: XMM_REG, NamedReg: REG_INVALID}, }, }, ResultIdxOp: 1, }, { Name: "ShuffleEpi32", InstructionName: "PSHUFD", VarOps: []VarOp{ { Var: Variable{reflect.TypeOf(simd.M128i{})}, Op: Operand{Flags: In, Reg: XMM_REG, NamedReg: REG_INVALID}}, { Var: Variable{reflect.TypeOf(simd.M128i{})}, Op: Operand{Flags: In | Out, Reg: XMM_REG, NamedReg: REG_INVALID}, }, }, ResultIdxOp: 1, }, { Name: "SlliSi128", InstructionName: "PSLLO", VarOps: []VarOp{ { Var: Variable{reflect.TypeOf(simd.M128i{})}, Op: Operand{Flags: Imm8, Reg: INVALID_REG, NamedReg: REG_INVALID}}, { Var: Variable{reflect.TypeOf(simd.M128i{})}, Op: Operand{Flags: In | Out, Reg: XMM_REG, NamedReg: REG_INVALID}, }, }, ResultIdxOp: 1, }, { Name: "SrliSi128", InstructionName: "PSRLO", VarOps: []VarOp{ { Var: Variable{reflect.TypeOf(simd.M128i{})}, Op: Operand{Flags: In, Reg: XMM_REG, NamedReg: REG_INVALID}}, { Var: Variable{reflect.TypeOf(simd.M128i{})}, Op: Operand{Flags: In | Out, Reg: XMM_REG, NamedReg: REG_INVALID}, }, }, ResultIdxOp: 1, }, { Name: "UnpackhiEpi64", InstructionName: "PUNPCKHLQ", VarOps: []VarOp{ { Var: Variable{reflect.TypeOf(simd.M128i{})}, Op: Operand{Flags: Imm8, Reg: XMM_REG, NamedReg: REG_INVALID}}, { Var: Variable{reflect.TypeOf(simd.M128i{})}, Op: Operand{Flags: In | Out, Reg: XMM_REG, NamedReg: REG_INVALID}, }, }, ResultIdxOp: 1, }, { Name: "UnpackloEpi64", InstructionName: "PUNPCKLLQ", VarOps: []VarOp{ { Var: Variable{reflect.TypeOf(simd.M128i{})}, Op: Operand{Flags: Imm8, Reg: INVALID_REG, NamedReg: REG_INVALID}}, { Var: Variable{reflect.TypeOf(simd.M128i{})}, Op: Operand{Flags: In | Out, Reg: XMM_REG, NamedReg: REG_INVALID}, }, }, ResultIdxOp: 1, }, { Name: "AddPd", InstructionName: "ADDPD", VarOps: []VarOp{ { Var: Variable{reflect.TypeOf(simd.M128d{})}, Op: Operand{Flags: In, Reg: XMM_REG, NamedReg: REG_INVALID}}, { Var: Variable{reflect.TypeOf(simd.M128d{})}, Op: Operand{Flags: In | Out, Reg: XMM_REG, NamedReg: REG_INVALID}, }, }, ResultIdxOp: 1, }, { Name: "AddSd", InstructionName: "ADDSD", VarOps: []VarOp{ { Var: Variable{reflect.TypeOf(simd.M128{})}, Op: Operand{Flags: In, Reg: XMM_REG, NamedReg: REG_INVALID}}, { Var: Variable{reflect.TypeOf(simd.M128{})}, Op: Operand{Flags: In | Out, Reg: XMM_REG, NamedReg: REG_INVALID}, }, }, ResultIdxOp: 1, }, { Name: "AndnotPd", InstructionName: "ANDNPD", VarOps: []VarOp{ { Var: Variable{reflect.TypeOf(simd.M128d{})}, Op: Operand{Flags: In, Reg: XMM_REG, NamedReg: REG_INVALID}}, { Var: Variable{reflect.TypeOf(simd.M128d{})}, Op: Operand{Flags: In | Out, Reg: XMM_REG, NamedReg: REG_INVALID}, }, }, ResultIdxOp: 1, }, { Name: "CmpeqPd", InstructionName: "CMPPD", VarOps: []VarOp{ { Var: Variable{}, Const: 0, Op: Operand{Flags: Imm8, Reg: INVALID_REG, NamedReg: REG_INVALID}}, { Var: Variable{reflect.TypeOf(simd.M128{})}, Op: Operand{Flags: In, Reg: XMM_REG, NamedReg: REG_INVALID}}, { Var: Variable{reflect.TypeOf(simd.M128{})}, Op: Operand{Flags: In | Out, Reg: XMM_REG, NamedReg: REG_INVALID}, }, }, ResultIdxOp: 1, }, { Name: "CmpeqSd", InstructionName: "CMPPD", VarOps: []VarOp{ { Var: Variable{}, Const: 0, Op: Operand{Flags: Imm8, Reg: INVALID_REG, NamedReg: REG_INVALID}}, { Var: Variable{reflect.TypeOf(simd.M128{})}, Op: Operand{Flags: In, Reg: XMM_REG, NamedReg: REG_INVALID}}, { Var: Variable{reflect.TypeOf(simd.M128{})}, Op: Operand{Flags: In | Out, Reg: XMM_REG, NamedReg: REG_INVALID}, }, }, ResultIdxOp: 2, }, } func getSSE2(name string) (Intrinsic, bool) { for _, sse2instr := range sse2intructions { if sse2instr.Name == name { return sse2instr, true } } return Intrinsic{}, false } func
(f *Function, loc ssa.Instruction, call *ssa.Call, intrinsic Intrinsic, args []ssa.Value) string { var asm string idents := []*identifier{} argRegs := []*register{} result := f.Ident(call) asm += fmt.Sprintf("// BEGIN INTRINSIC %v\n", intrinsic.Name) asm += fmt.Sprintf("// BEGIN LOAD ARGS %v\n", intrinsic.Name) // load the arguments into registers for _, arg := range args { ident := f.Ident(arg) a, reg, err := f.LoadIdent(loc, ident, 0, ident.size()) if err != nil { ice(fmt.Sprintf("%v", err.Err)) } reg.inUse = true asm += a idents = append(idents, ident) argRegs = append(argRegs, reg) } asm += fmt.Sprintf("// END LOAD ARGS %v\n", intrinsic.Name) // construct the assembly instruction var regResult *register argIdx := 0 assembly := fmt.Sprintf("%-9v ", intrinsic.InstructionName) for i, varop := range intrinsic.VarOps { ident := idents[argIdx] reg := argRegs[argIdx] flags := varop.Op.Flags if flags&Out != 0 { if i == intrinsic.ResultIdxOp { regResult = reg } else { reg.dirty = true } } if flags&Implicit != 0 { continue } if flags&Const != 0 { if flags&Imm8 != 0 || flags&Imm32 != 0 { asm += "$" + strconv.Itoa(varop.Const) } else if flags&ImmF32 != 0 || flags&ImmF64 != 0 { asm += ice("unimplemented case") } else { ice("unexpected case") } continue } if flags&In == 0 && flags&Out == 0 { continue } if reflectType(args[argIdx].Type()).String() != varop.Var.VarType.String() { got := reflectType(args[argIdx].Type()).String() expected := varop.Var.VarType.String() msg := fmt.Sprintf("wrong type for argument, Got: %v, Expected: %v", got, expected) ice(msg) } if flags&In != 0 { if flags&Imm8 != 0 || flags&Imm32 != 0 { if ident.cnst == nil { panic("Provided argument must be a constant") } else { assembly += "$" + strconv.Itoa(int(ident.cnst.Uint64())) } } else if flags&ImmF32 != 0 || flags&ImmF64 != 0 { ice("unimplemented case") } assembly += reg.name + ", " argIdx++ } } assembly = strings.TrimSuffix(assembly, ", ") + "\n" asm += assembly if regResult != nil { c, err := f.StoreSSE2(loc, regResult, result) if err != nil { ice(fmt.Sprint(err.Err)) } asm += c } else { ice("no result from intrinsic") } for _, reg := range argRegs { f.freeReg(reg) } asm += fmt.Sprintf("// END INTRINSIC %v\n", intrinsic.Name) return asm }
sse2Intrinsic
bootstrap.js
import React from "react"; import ReactDOM from "react-dom"; import { createMemoryHistory, createBrowserHistory } from "history"; import App from "./App"; const mount = (el, { onSignIn, onNavigate, defaultHistory, initialPath }) => { const historyFn = defaultHistory || createMemoryHistory; const history = historyFn({ initialEntries: [initialPath], }); history.listen(onNavigate); ReactDOM.render(<App history={history} onSignIn={onSignIn} />, el); return { onParentNavigate({ pathname: nextPathname }) { const { pathname } = history.location; if (pathname !== nextPathname) { history.push(nextPathname); } }, };
}; if (process.env.NODE_ENV === "development") { const el = document.getElementById("_auth-dev-root"); if (el) { mount(el, { defaultHistory: createBrowserHistory }); } } export { mount };
misc_robinson.py
""" Robinson ======== The Robinson projection, presented by the American geographer and cartographer Arthur H. Robinson in 1963, is a modified cylindrical projection that is neither conformal nor equal-area. Central meridian and all parallels are straight lines; other meridians are curved. It uses lookup tables rather than analytic expressions to make the world map "look" right 22. The scale is true along latitudes 38. The projection was originally developed for use by Rand McNally and is currently used by the National Geographic Society. **n**\ [*lon0/*]\ *scale* or **N**\ [*lon0/*]\ *width* The projection is set with **n** or **N**. The central meridian is set with the optional *lon0*, and the figure size is set with *scale* or *width*.
# Use region "d" to specify global region (-180/180/-90/90) fig.coast(region="d", projection="N12c", land="goldenrod", water="snow2", frame="afg") fig.show()
""" import pygmt fig = pygmt.Figure()
support.py
''' Module responsible for running the --support option for collecting debug information ''' import logging import shlex import re import os import requests import tempfile import time import subprocess from insights import get_nvr from subprocess import Popen, PIPE, STDOUT from constants import InsightsConstants as constants from connection import InsightsConnection from config import CONFIG as config APP_NAME = constants.app_name logger = logging.getLogger(__name__) def registration_check(pconn): # check local registration record unreg_date = None unreachable = False if os.path.isfile(constants.registered_file): local_record = 'System is registered locally via .registered file.' with open(constants.registered_file) as reg_file: local_record += ' Registered at ' + reg_file.readline() else: local_record = 'System is NOT registered locally via .registered file.' if os.path.isfile(constants.unregistered_file): with open(constants.unregistered_file) as reg_file: local_record += ' Unregistered at ' + reg_file.readline() api_reg_status = pconn.api_registration_check() logger.debug('Registration status: %s', api_reg_status) if type(api_reg_status) is bool: if api_reg_status: api_record = 'Insights API confirms registration.' else: api_record = 'Insights API could not be reached to confirm registration status.' unreachable = True elif api_reg_status is None: api_record = 'Insights API says this machine is NOT registered.' api_reg_status = False else:
return {'messages': [local_record, api_record], 'status': api_reg_status, 'unreg_date': unreg_date, 'unreachable': unreachable} class InsightsSupport(object): ''' Build the support logfile ''' def __init__(self): pass def collect_support_info(self): logger.info('Collecting logs...') self._support_diag_dump() logger.info('Copying Insights logs to archive...') log_archive_dir = tempfile.mkdtemp(prefix='/var/tmp/') tar_file = os.path.join(log_archive_dir, 'insights-client-logs-' + time.strftime('%Y%m%d%H%M%S') + '.tar.gz') tar_cmd = 'tar czfS {0} -C {1} .'.format( tar_file, constants.log_dir) subprocess.call(shlex.split(tar_cmd), stderr=subprocess.PIPE) logger.info('Support information collected in %s', tar_file) def _support_diag_dump(self): ''' Collect log info for debug ''' # check insights config cfg_block = [] pconn = InsightsConnection() logger.info('Insights version: %s', get_nvr()) reg_check = registration_check(pconn) cfg_block.append('Registration check:') for key in reg_check: cfg_block.append(key + ': ' + str(reg_check[key])) lastupload = 'never' if os.path.isfile(constants.lastupload_file): with open(constants.lastupload_file) as upl_file: lastupload = upl_file.readline().strip() cfg_block.append('\nLast successful upload was ' + lastupload) cfg_block.append('auto_config: ' + str(config['auto_config'])) if config['proxy']: obfuscated_proxy = re.sub(r'(.*)(:)(.*)(@.*)', r'\1\2********\4', config['proxy']) else: obfuscated_proxy = 'None' cfg_block.append('proxy: ' + obfuscated_proxy) logger.info('\n'.join(cfg_block)) logger.info('python-requests: %s', requests.__version__) succ = pconn.test_connection() if succ == 0: logger.info('Connection test: PASS\n') else: logger.info('Connection test: FAIL\n') # run commands commands = ['uname -a', 'cat /etc/redhat-release', 'env', 'sestatus', 'subscription-manager identity', 'systemctl cat insights-client.timer', 'systemctl cat insights-client.service', 'systemctl status insights-client.timer', 'systemctl status insights-client.service'] for cmd in commands: logger.info("Running command: %s", cmd) try: proc = Popen( shlex.split(cmd), shell=False, stdout=PIPE, stderr=STDOUT, close_fds=True) stdout, stderr = proc.communicate() except OSError as o: if 'systemctl' not in cmd: # suppress output for systemctl cmd failures logger.info('Error running command "%s": %s', cmd, o) except Exception as e: # unknown error logger.info("Process failed: %s", e) logger.info("Process output: \n%s", stdout) # check available disk space for /var/tmp tmp_dir = '/var/tmp' dest_dir_stat = os.statvfs(tmp_dir) dest_dir_size = (dest_dir_stat.f_bavail * dest_dir_stat.f_frsize) logger.info('Available space in %s:\t%s bytes\t%.1f 1K-blocks\t%.1f MB', tmp_dir, dest_dir_size, dest_dir_size / 1024.0, (dest_dir_size / 1024.0) / 1024.0)
api_record = 'Insights API says this machine was unregistered at ' + api_reg_status unreg_date = api_reg_status api_reg_status = False
unified.js
const redisCfg = require('./redis') module.exports = { settings: { rpcTimeout: 10000, _unified: true }, options: { apigateway: { namespace: process.env.IIOS_NAMESPACE || 'ignitialio', /* calling timeout for pub/sub mode */ timeout: 5000, connector: { redis: { encoder: 'bson', sentinels: redisCfg.REDIS_SENTINELS, host: redisCfg.REDIS_HOST, port: redisCfg.REDIS_PORT,
}, myunified: { someConfiguration: 'that\'s all folks !' } } }
db: redisCfg.REDIS_DB } }
helper.service.ts
import { ElementRef, Injectable } from '@angular/core'; // @Injectable({ // providedIn: 'root' // }) @Injectable() export class
{ themeColors = ['default','primary','secondary','success','error','warning','base']; defaultColor = this.themeColors[0]; constructor() { } /** * checks if the given theme color is valid theme color or not, if invalid returns default theme color * @param color theme color to be validated * @returns returns a valid theme color, if invalid returns 'default' */ sanitizeThemeColor(color:string):string{ return this.themeColors.indexOf(color.toLowerCase()) > -1 ? color.toLowerCase() : this.defaultColor; } /** * returns the css color variable for the theme color(default,primary...) otherwise returns the same value * @param color color to be sanitized * @returns sanitized color */ sanitizeColor(color:string):string{ let colorLC = color ? color.toLowerCase() : ''; let themeColor = this.themeColors.find((tc)=>{ return colorLC.indexOf(tc) >=0; }); //return themeColor ? `var(--ngw-${colorLC})` : colorLC; return themeColor ? `var(--ngw-theme-${color})` : color; } /** * Apply the given style poperty to given element. * @param el Element for which style needs to be applied * @param propertyName style porperty that needs to be set * @param propertyValue style value that need to be set */ applyStyle(el:ElementRef,propertyName:string,propertyValue:string){ el.nativeElement.style[propertyName] = propertyValue; } /** * sanitize and Apply the given style poperty to given element. * @param el Element for which style needs to be applied * @param propertyName style porperty that needs to be set * @param propertyValue style value that need to be set */ sanitizeAndApplyStyle(el:ElementRef,propertyName:string,propertyValue:string){ el.nativeElement.style[propertyName] = this.sanitizeColor(propertyValue); } /** * Extracts and returns CSS style obj for given css properties from the given HTML Element * @param el Element from which styles needs to be extracted * @param props Style properties to be extracted * @returns style object for given style properties */ getStyleObject(el:ElementRef,props:string[]){ let styleObj = {}; props.forEach((prop,i)=>{ styleObj[prop] = el.nativeElement.style[prop]; }); return styleObj; } /** * sets the style of the HTML element with given style * @param el HTML element for which style needs to be set * @param props set of CSS style proeprties * @param styleObj CSS styles with values to be set */ setStyleObject(el:ElementRef,props:string[],styleObj:any){ props.forEach((prop,i)=>{ el.nativeElement.style[prop] = styleObj[prop]; }); } sanitizeShadowValue(shadow:string):string{ let shdowToApply; if(shadow =="1"){ shdowToApply = '0 2px 1px -1px rgb(0 0 0 / 20%), 0 1px 1px 0 rgb(0 0 0 / 14%), 0 1px 3px 0 rgb(0 0 0 / 12%)'; }else if(shadow =="2"){ shdowToApply = '0 3px 1px -2px rgb(0 0 0 / 20%), 0 2px 2px 0 rgb(0 0 0 / 14%), 0 1px 5px 0 rgb(0 0 0 / 12%)'; }else if(shadow =="3"){ shdowToApply = '0 3px 3px -2px rgb(0 0 0 / 20%), 0 3px 4px 0 rgb(0 0 0 / 14%), 0 1px 8px 0 rgb(0 0 0 / 12%)'; }else if(shadow =="4"){ shdowToApply = '0 2px 4px -1px rgb(0 0 0 / 20%), 0 4px 5px 0 rgb(0 0 0 / 14%), 0 1px 10px 0 rgb(0 0 0 / 12%)'; }else if(shadow =="5"){ shdowToApply = '0 3px 5px -1px rgb(0 0 0 / 20%), 0 5px 8px 0 rgb(0 0 0 / 14%), 0 1px 14px 0 rgb(0 0 0 / 12%)'; }else{ shdowToApply = shadow } return shdowToApply; } }
HelperService
evaluate.py
from game2048.game import Game from game2048.displays import Display import os os.environ["CUDA_VISIBLE_DEVICES"]="1" # import timer def
(size, score_to_win, AgentClass, **kwargs): game = Game(size, score_to_win) agent = AgentClass(game, display = Display(), **kwargs) agent.play(verbose=True) return game.score if __name__ == '__main__': GAME_SIZE = 4 SCORE_TO_WIN = 2048 N_TESTS = 50 '''==================== Use your own agent here.''' from game2048.agents import MyOwnAgent as TestAgent '''====================''' scores = [] for i in range(N_TESTS): score = single_run(GAME_SIZE, SCORE_TO_WIN, AgentClass=TestAgent) scores.append(score) print i # print timer print("Average scores: @%s times" % N_TESTS, sum(scores) / len(scores))
single_run
basic02.py
""" __new__()方法, 对象创建的过程, 1- new方法返回一个对象 2- init利用new返回的对象进行属性的添加 """ class Person(object): # 监听创建一个实例对象的过程,需要返回一个对象赋值给xiaoming # new中不return的话,那么久不会执行init方法 def __new__(cls, *args, **kwargs): print("new")
print((object.__new__(cls))) return object.__new__(cls) # 构造方法,当执行init方法的时候对象**已经创建成功**,剩下的是将属性添加到对象中 def __init__(self, name): print("init") self.name = name # 类的toString方法 # def __str__(self): # return "我的名字是: %s" % self.name # 监听引用计数为0的时候,python会执行del方法 def __del__(self): print("再见") # xioaming的地址和new中return的obj的地址一样,说明new中返回的obj就是xiaoming xiaoming = Person("小明") print(xiaoming) print("=" * 28) """ python的单例模式,需要使用到new关键方法 1- 保证返回的对象是同一个,在new中修改 2- 保证对象的属性只能赋值一次,在init方法中修改 3- 一般单例模式中的包含静态方法, 类似于Tools.XX, 不需要创建多个对象来调用同一个静态方法 """ class Student(object): # 定义一个类属型保存实例对象 __instance = None # 类属型保证实例属性只能被赋值一次 __is_first = True # s1,s2要保证使用一份内存, 需要new的时候返回同一个对象 def __new__(cls, *args, **kwargs): if cls.__instance is None: cls.__instance = object.__new__(cls) return cls.__instance def __init__(self, name, age): if self.__is_first: self.name = name self.age = age self.__is_first = False # 静态方法 @staticmethod def add_num(a, b): return a + b s1 = Student("小明", 25) s2 = Student("小红", 28) print(s1) print(s2) print(s1.name) print(s2.name)
directory_view.rs
//! Lists the contents of a single directory. //! //! Reacts to events for selection of one or more files, de-selection, deletion and //! double-clicking. use { Borderable, color, Color, Colorable, FontSize, Labelable, Positionable, Sizeable, Scalar, Widget, }; use event; use std; use widget; use std::cmp::Ordering; /// For viewing, selecting, double-clicking, etc the contents of a directory. #[derive(WidgetCommon_)] pub struct DirectoryView<'a> { #[conrod(common_builder)] common: widget::CommonBuilder, /// Unique styling for the widget. pub style: Style, /// The path of the directory to display. pub directory: &'a std::path::Path, /// Only display files of the given type. pub types: super::Types<'a>, // Whether or not hidden files and directories will be shown. show_hidden: bool, } /// Unique state stored within the widget graph for each `FileNavigator`. pub struct State { /// The absolute path, `Rectangle` and `Text` indices for each file in the directory. entries: Vec<Entry>, /// The absolute path to the directory. directory: std::path::PathBuf, /// The `DirectoryView`'s children widgets: /// /// - The background color for the directory view. /// - The index used to instantiate the `ListSelect` widget. ids: Ids, } /// Data stored for each `File` in the `State`. #[derive(Clone, Debug, PartialEq)] pub struct Entry { path: std::path::PathBuf, is_selected: bool, } widget_ids! { struct Ids { rectangle, list_select, } } /// Unique styling for the widget. #[derive(Copy, Clone, Debug, Default, PartialEq, WidgetStyle_)] pub struct Style { /// Color of the selected entries. #[conrod(default = "theme.shape_color")] pub color: Option<Color>, /// The color of the unselected entries. #[conrod(default = "None")] pub unselected_color: Option<Option<Color>>, /// The color of the directory and file names. #[conrod(default = "None")] pub text_color: Option<Option<Color>>, /// The font size for the directory and file names. #[conrod(default = "theme.font_size_medium")] pub font_size: Option<FontSize>, } /// The kinds of `Event`s produced by the `DirectoryView`. #[derive(Clone)] pub enum Event { /// Some change in the `Selection` occurred. This represents the new full selection. Selection(Vec<std::path::PathBuf>), /// One or more entries have been double clicked. Click(event::Click, Vec<std::path::PathBuf>), /// One or more entries have been double clicked. DoubleClick(event::DoubleClick, Vec<std::path::PathBuf>), /// A `Press` event occurred while the given entries were selected. Press(event::Press, Vec<std::path::PathBuf>), /// A `Release` event occurred while the given entries were selected. Release(event::Release, Vec<std::path::PathBuf>), } #[cfg(all(target_os = "windows", not(feature = "windows_metadataext")))] fn is_file_hidden(_path: &std::path::PathBuf) -> bool { false } #[cfg(all(target_os = "windows", feature = "windows_metadataext"))] /// Check if a file is hidden on windows, using the file attributes. /// To be enabled once windows::fs::MetadataExt is no longer an unstable API. fn is_file_hidden(path: &std::path::PathBuf) -> bool { use std::os::windows::fs::MetadataExt; const FILE_ATTRIBUTE_HIDDEN: u32 = 0x2; let metadata = std::fs::metadata(&path).ok(); if let Some(metadata) = metadata { let win_attr: u32 = metadata.file_attributes(); return (win_attr & FILE_ATTRIBUTE_HIDDEN) != 0; } false } #[cfg(not(target_os = "windows"))] /// Check if a file is hidden on any other OS than windows, using the dot file namings. fn is_file_hidden(path: &std::path::PathBuf) -> bool { let name = path.file_name(); if let Some(name) = name { return name.to_string_lossy().starts_with("."); } false } /// Returns true if file or directory should be displayed depending on configuration /// and file status (hidden or not) and extension (matching or not) fn check_hidden(show_hidden: bool, types: super::Types, path: &std::path::PathBuf) -> bool { // Reject hidden files or directories if is_file_hidden(path) && !show_hidden { return false } match types { super::Types::All => return true, super::Types::WithExtension(valid_exts) => { // We only filter files by extension if path.is_dir() { return true } // Check for valid extensions. let ext = path.extension() .and_then(|ext| ext.to_str()) .map(|s| std::ascii::AsciiExt::to_ascii_lowercase(s)) .unwrap_or_else(String::new); if valid_exts.iter().any(|&valid_ext| &ext == valid_ext) { return true } else {
} }, super::Types::Directories => return path.is_dir(), } } impl<'a> DirectoryView<'a> { /// Begin building a `DirectoryNavigator` widget that displays only files of the given types. pub fn new(directory: &'a std::path::Path, types: super::Types<'a>) -> Self { DirectoryView { common: widget::CommonBuilder::default(), style: Style::default(), directory: directory, types: types, show_hidden: false, } } /// The color of the unselected entries within each `DirectoryView`. pub fn unselected_color(mut self, color: Color) -> Self { self.style.unselected_color = Some(Some(color)); self } /// The color of the `Text` used to display the file names. pub fn text_color(mut self, color: Color) -> Self { self.style.text_color = Some(Some(color)); self } /// Whether to show hidden files and directories pub fn show_hidden_files(mut self, show_hidden: bool) -> Self { self.show_hidden = show_hidden; self } builder_methods!{ pub font_size { style.font_size = Some(FontSize) } } } impl<'a> Widget for DirectoryView<'a> { type State = State; type Style = Style; type Event = Vec<Event>; fn init_state(&self, id_gen: widget::id::Generator) -> Self::State { State { entries: Vec::new(), directory: std::path::PathBuf::new(), ids: Ids::new(id_gen), } } fn style(&self) -> Self::Style { self.style.clone() } fn update(self, args: widget::UpdateArgs<Self>) -> Self::Event { let widget::UpdateArgs { id, state, style, rect, ui, .. } = args; let DirectoryView { directory, types, .. } = self; if directory != &state.directory { state.update(|state| { state.directory = directory.to_path_buf(); state.entries.clear(); }); let show_hidden = self.show_hidden; let mut entries: Vec<_> = match std::fs::read_dir(directory).ok() { Some(entries) => { entries.filter_map(|e| e.ok()) .filter_map(|f| { let path = f.path(); if check_hidden(show_hidden, types, &path) { Some(path) } else { None } }).collect() } None => return Vec::new(), }; // Sort directories before files and alphabetically otherwise entries.sort_by(|a,b| { if a.is_dir() && !b.is_dir() { Ordering::Less } else if !a.is_dir() && b.is_dir() { Ordering::Greater } else { a.cmp(b) } }); state.update(|state| { for entry_path in entries { let entry = Entry { path: entry_path.to_path_buf(), is_selected: false, }; state.entries.push(entry); } }); } let color = style.color(&ui.theme); let font_size = style.font_size(&ui.theme); let file_h = font_size as Scalar * 2.0; let unselected_rect_color = style.unselected_color(&ui.theme) .unwrap_or_else(|| color.plain_contrast().plain_contrast()); let text_color = style.text_color(&ui.theme) .unwrap_or_else(|| color.plain_contrast()); // Color the background of the directory view. widget::Rectangle::fill(rect.dim()) .color(unselected_rect_color) .xy(rect.xy()) .parent(id) .graphics_for(id) .set(state.ids.rectangle, ui); // Collect any events that have occurred. let mut events = Vec::new(); let list_h = rect.h().min(state.entries.len() as Scalar * file_h); let (mut list_events, scrollbar) = widget::ListSelect::multiple(state.entries.len()) .flow_down() .item_size(file_h) .scrollbar_on_top() .w_h(rect.w(), list_h) .mid_top_of(id) .set(state.ids.list_select, ui); // A helper method for collecting all selected entries. let collect_selected = |entries: &[Entry]| entries.iter() .filter_map(|e| if e.is_selected { Some(e.path.clone()) } else { None }) .collect(); while let Some(event) = list_events.next(ui, |i| state.entries[i].is_selected) { use widget::list_select; match event { // Instantiate a `Button` for each item. list_select::Event::Item(item) => { use position::{Place, Relative}; let entry = &state.entries[item.i]; let is_selected = entry.is_selected; let is_directory = entry.path.is_dir(); // Get the file/directory name. let entry_name = state.entries[item.i].path.file_name() .and_then(|name| name.to_str()) .map_or_else(String::new, |s| { let mut string = s.to_string(); if is_directory { string.push('/'); } string }); // Determine the color of this file's `Rectangle`. let rect_color = if is_selected { color } else { match ui.widget_input(item.widget_id).mouse() { None => color::TRANSPARENT, Some(_) => unselected_rect_color, } }; let button = widget::Button::new() .border(0.0) .color(rect_color) .label(&entry_name) .label_color(text_color) .label_font_size(font_size) .label_x(Relative::Place(Place::Start(Some(font_size as Scalar)))) .left_justify_label(); item.set(button, ui); }, // Update the state's selection. list_select::Event::Selection(selection) => { match selection { list_select::Selection::Add(indices) => state.update(|state| for i in indices { state.entries[i].is_selected = true; }), list_select::Selection::Remove(indices) => state.update(|state| for i in indices { state.entries[i].is_selected = false; }), } events.push(Event::Selection(collect_selected(&state.entries))); }, // Propagate the interaction events. list_select::Event::Click(e) => events.push(Event::Click(e, collect_selected(&state.entries))), list_select::Event::DoubleClick(e) => events.push(Event::DoubleClick(e, collect_selected(&state.entries))), list_select::Event::Press(e) => events.push(Event::Press(e, collect_selected(&state.entries))), list_select::Event::Release(e) => events.push(Event::Release(e, collect_selected(&state.entries))), } } if let Some(s) = scrollbar { s.set(ui); } // If the scrollable `Rectangle` was pressed, deselect all entries. if ui.widget_input(id).presses().mouse().left().next().is_some() { // Deselect all entries. state.update(|state| for entry in &mut state.entries { entry.is_selected = false; }); events.push(Event::Selection(Vec::new())); } events } } impl<'a> Colorable for DirectoryView<'a> { builder_method!(color { style.color = Some(Color) }); }
return false
replica_state.rs
use std::{ cmp::min, collections::{BTreeMap, HashSet}, ops::{Deref, DerefMut}, sync::Arc, }; use std::iter::FromIterator; use std::fmt; use tracing::{debug, error, warn}; use tracing::instrument; use async_rwlock::{RwLock}; use dataplane::{record::RecordSet}; use dataplane::{Offset, Isolation, ReplicaKey}; use fluvio_controlplane_metadata::partition::{Replica}; use fluvio_controlplane::LrsRequest; use fluvio_storage::{FileReplica, StorageError, ReplicaStorage, OffsetInfo}; use fluvio_types::{SpuId}; use crate::{ config::{ReplicationConfig}, control_plane::SharedStatusUpdate, }; use crate::replication::follower::sync::{PeerFileTopicResponse, PeerFilePartitionResponse}; use crate::storage::SharableReplicaStorage; use super::{FollowerNotifier}; pub type SharedLeaderState<S> = LeaderReplicaState<S>; pub type SharedFileLeaderState = LeaderReplicaState<FileReplica>; #[derive(Debug)] pub struct LeaderReplicaState<S> { replica: Replica, in_sync_replica: u16, storage: SharableReplicaStorage<S>, config: ReplicationConfig, followers: Arc<RwLock<BTreeMap<SpuId, OffsetInfo>>>, status_update: SharedStatusUpdate, } impl<S> Clone for LeaderReplicaState<S> { fn clone(&self) -> Self { Self { replica: self.replica.clone(), storage: self.storage.clone(), config: self.config.clone(), followers: self.followers.clone(), in_sync_replica: self.in_sync_replica, status_update: self.status_update.clone(), } } } impl<S> fmt::Display for LeaderReplicaState<S> where S: ReplicaStorage, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Leader state for {}", self.id()) } } impl<S> Deref for LeaderReplicaState<S> { type Target = SharableReplicaStorage<S>; fn deref(&self) -> &Self::Target { &self.storage } } impl<S> DerefMut for LeaderReplicaState<S> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.storage } } /// convert follower ids into BtreeMap of this fn ids_to_map(leader_id: SpuId, follower_ids: HashSet<SpuId>) -> BTreeMap<SpuId, OffsetInfo> { let mut followers = BTreeMap::new(); for id in follower_ids.into_iter().filter(|id| *id != leader_id) { followers.insert(id, OffsetInfo::default()); } followers } impl<S> LeaderReplicaState<S> where S: ReplicaStorage, { /// create new state from existing storage /// calculate default in_sync_replica from followers pub fn new( replica: Replica, config: ReplicationConfig, status_update: SharedStatusUpdate, inner: SharableReplicaStorage<S>, ) -> Self { debug!(?replica, "replica storage"); let in_sync_replica = replica.replicas.len() as u16; let follower_ids = HashSet::from_iter(replica.replicas.clone()); let followers = ids_to_map(replica.leader, follower_ids); debug!(?followers, "leader followers"); debug!( in_sync_replica, replica = %replica.id, follower = ?replica.replicas, "creating leader" ); Self { replica, storage: inner, config, followers: Arc::new(RwLock::new(followers)), in_sync_replica, status_update, } } /// create new complete state and spawn controller
) -> Result<LeaderReplicaState<S>, StorageError> where ReplicationConfig: From<&'a C>, S::Config: From<&'a C>, { let inner = SharableReplicaStorage::create(replica.id.clone(), config.into()).await?; let leader_replica = Self::new(replica, config.into(), status_update, inner); leader_replica.update_status().await; Ok(leader_replica) } /// replica id pub fn id(&self) -> &ReplicaKey { &self.replica.id } /// leader SPU. This should be same as our local SPU pub fn leader(&self) -> SpuId { self.replica.leader } /// override in sync replica #[allow(unused)] fn set_in_sync_replica(&mut self, replica_count: u16) { self.in_sync_replica = replica_count; } /// update leader's state from follower's offset states /// if follower's state has been updated may result in leader's hw update /// return true if update has been updated, in this case, updates can be computed to followers /// return false if no change in state leader #[instrument(skip(self, notifier))] pub async fn update_states_from_followers( &self, follower_id: SpuId, follower_pos: OffsetInfo, notifier: &FollowerNotifier, ) -> bool { let leader_pos = self.as_offset(); // follower must be always behind leader if follower_pos.newer(&leader_pos) { warn!(?follower_pos, ?leader_pos, "follower pos must not be newer"); return false; } // get follower info let mut followers = self.followers.write().await; let update = if let Some(current_follow_info) = followers.get_mut(&follower_id) { if current_follow_info.update(&follower_pos) { // if our leo and hw is same there is no need to recompute hw if !leader_pos.is_committed() { if let Some(hw) = compute_hw(&leader_pos, self.in_sync_replica, &followers) { debug!(hw, "updating hw"); if let Err(err) = self.update_hw(hw).await { error!("error updating hw: {}", err); } } else { debug!("no hw change"); } } else { debug!("leader is committed"); } debug!("follower changed"); true } else { false } } else { error!(follower_id, "invalid follower"); false }; drop(followers); self.notify_followers(notifier).await; if update { self.update_status().await; } update } /// compute follower that needs to be updated /// based on leader's state pub async fn follower_updates( &self, follower_id: &SpuId, max_bytes: u32, ) -> Option<PeerFileTopicResponse> { let leader_offset = self.as_offset(); let reader = self.followers.read().await; if let Some(follower_info) = reader.get(follower_id) { if follower_info.is_valid() && !follower_info.is_same(&leader_offset) { let mut topic_response = PeerFileTopicResponse { name: self.id().topic.to_owned(), ..Default::default() }; let mut partition_response = PeerFilePartitionResponse { partition: self.id().partition, ..Default::default() }; // if this follower's leo is less than leader's leo then send diff if follower_info.leo < leader_offset.leo { let offset = self .read_records( follower_info.leo, max_bytes, Isolation::ReadUncommitted, &mut partition_response, ) .await; debug!( hw = offset.hw, leo = offset.leo, replica = %self.id(), len = partition_response.records.len(), "read records" ); } else { // only hw need to be updated debug!( hw = leader_offset.hw, leo = leader_offset.leo, replica = %self.id(), "sending hw only"); } // ensure leo and hw are set correctly. storage might have update last stable offset partition_response.leo = leader_offset.leo; partition_response.hw = leader_offset.hw; topic_response.partitions.push(partition_response); Some(topic_response) } else { None } } else { None } } /// convert myself as async fn as_lrs_request(&self) -> LrsRequest { let leader = (self.leader(), self.hw(), self.leo()).into(); let replicas = self .followers .read() .await .iter() .map(|(follower_id, follower_info)| { (*follower_id, follower_info.hw, follower_info.leo).into() }) .collect(); LrsRequest::new(self.id().to_owned(), leader, replicas) } #[instrument(skip(self))] pub async fn update_status(&self) { let lrs = self.as_lrs_request().await; debug!(hw = lrs.leader.hw, leo = lrs.leader.leo); self.status_update.send(lrs).await } /// write records to storage /// then update our follower's leo #[instrument(skip(self, records, notifiers))] pub async fn write_record_set( &self, records: &mut RecordSet, notifiers: &FollowerNotifier, ) -> Result<(), StorageError> { self.storage .write_record_set(records, self.in_sync_replica == 1) .await?; self.notify_followers(notifiers).await; self.update_status().await; Ok(()) } async fn notify_followers(&self, notifier: &FollowerNotifier) { let leader_offset = self.as_offset(); let followers = self.followers.read().await; debug!(?leader_offset); for follower in &self.replica.replicas { if let Some(follower_info) = followers.get(&follower) { debug!(follower, ?follower_info); if follower_info.is_valid() && !follower_info.is_same(&leader_offset) { debug!(follower, "notify"); notifier.notify_follower(follower, self.id().clone()).await; } else { debug!(follower, "no update"); } } } } #[allow(dead_code)] pub async fn live_replicas(&self) -> Vec<SpuId> { self.followers.read().await.keys().cloned().collect() } // get copy of followers_info for debugging #[allow(unused)] pub async fn followers_info(&self) -> BTreeMap<SpuId, OffsetInfo> { self.followers.read().await.clone() } } /// compute leader's updated hw based on follower offset /// this is done after follower's leo updated /// min_replica must be at least 1 and must be less than followers.len(0) /// update hw based on offset change /// /// // case 1: follower offset has same value as leader /// // leader: leo: 2, hw: 2, follower: leo: 2, hw: 2 /// // Input: leo 2, hw: 2, this happens during follower resync. /// // Expect: no changes, /// /// // case 2: follower offset is same as previous /// // leader: leo: 2, hw: 2, follower: leo: 1, hw: 1 /// // Input: leo: 1, hw:1, /// // Expect, no status but follower sync /// // /// // case 3: different follower offset /// // leader: leo: 3, hw: 3, follower: leo: 1, hw: 1 /// // Input: leo: 2, hw: 2, /// // Expect, status change, follower sync /// /// Simple HW mark calculation (assume LRS = 2) which is find minimum offset values that satisfy /// Assume: Leader leo = 10, hw = 2, /// follower: leo(2,4) => no change, since it doesn't satisfy minim LRS /// follower: leo(3,4) => hw = 3 that is smallest leo that satisfy /// follower: leo(4,4) => hw = 4 /// follower: leo(6,7,9) => hw = 7, fn compute_hw( leader: &OffsetInfo, min_replica: u16, followers: &BTreeMap<SpuId, OffsetInfo>, ) -> Option<Offset> { // assert!(min_replica > 0); // assert!((min_replica - 1) <= followers.len() as u16); let min_lrs = min(min_replica - 1, followers.len() as u16); // compute unique offsets that is greater than min leader's HW let qualified_leos: Vec<Offset> = followers .values() .filter_map(|follower_info| { let leo = follower_info.leo; if leo > leader.hw { Some(leo) } else { None } }) .collect(); if qualified_leos.is_empty() { return None; } //println!("qualified: {:#?}", qualified_leos); let mut unique_leos = qualified_leos.clone(); unique_leos.dedup(); // debug!("unique_leos: {:#?}", unique_leos); let mut hw_list: Vec<Offset> = unique_leos .iter() .filter_map(|unique_offset| { // leo must have at least must have replicated min_lrs if (qualified_leos .iter() .filter(|leo| unique_offset <= leo) .count() as u16) >= min_lrs { Some(*unique_offset) } else { None } }) .collect(); hw_list.sort_unstable(); hw_list.pop() } impl<S> LeaderReplicaState<S> where S: ReplicaStorage {} impl LeaderReplicaState<FileReplica> {} #[cfg(test)] mod test_hw_updates { use super::*; fn offsets_maps(offsets: Vec<(SpuId, OffsetInfo)>) -> BTreeMap<SpuId, OffsetInfo> { offsets.into_iter().collect() } /// test min lrs check /* #[test] #[should_panic] fn test_hw_min_lrs_invalid_hw() { compute_hw(&OffsetInfo { hw: 0, leo: 10 }, 0, &offsets_maps(vec![])); } */ /* TODO: Revisit check of min lrs #[test] #[should_panic] fn test_hw_min_lrs_too_much() { compute_hw( &OffsetInfo { hw: 0, leo: 10 }, 3, &offsets_maps(vec![(5001, OffsetInfo::default())]), ); } */ // test hw calculation for 2 spu and 2 in sync replicas #[test] fn test_hw22() { // starts with leader leo=10,hw = 0 // initially, we don't have no information about follower, // our hw doesn't need to be updated assert_eq!( compute_hw( &OffsetInfo { hw: 0, leo: 10 }, 2, &offsets_maps(vec![(5001, OffsetInfo::default())]) ), None ); // followers send back leo = 4, hw = 0 // This cause hw = 4 since this is min that is replicated across 2 SPU assert_eq!( compute_hw( &OffsetInfo { hw: 0, leo: 10 }, 2, &offsets_maps(vec![(5001, OffsetInfo { leo: 4, hw: 0 })]) ), Some(4) ); // we send back follower hw = 4 // followers send back leo = 6, hw = 4 // This should update hw = 6 assert_eq!( compute_hw( &OffsetInfo { hw: 4, leo: 10 }, 2, &offsets_maps(vec![(5001, OffsetInfo { leo: 6, hw: 4 })]) ), Some(6) ); // follower send back same info, since min LEO didn't update, no hw update assert_eq!( compute_hw( &OffsetInfo { hw: 6, leo: 10 }, 2, &offsets_maps(vec![(5001, OffsetInfo { leo: 6, hw: 6 })]) ), None ); // follower now fully caught up, leo = 10, hw = 6 // hw should be now 10 assert_eq!( compute_hw( &OffsetInfo { hw: 6, leo: 10 }, 2, &offsets_maps(vec![(5001, OffsetInfo { leo: 10, hw: 6 })]) ), Some(10) ); // followers send back same, no hw update assert_eq!( compute_hw( &OffsetInfo { hw: 10, leo: 10 }, 2, &offsets_maps(vec![(5001, OffsetInfo { leo: 10, hw: 10 })]) ), None ); } // test hw calculation for 3 spu with 2 in replica #[test] fn test_hw32() { assert_eq!( compute_hw( &OffsetInfo { hw: 0, leo: 10 }, 2, &offsets_maps(vec![ (5001, OffsetInfo::default()), (5002, OffsetInfo::default()) ]) ), None ); // hw updated when at least 1 SPU replicated offsets assert_eq!( compute_hw( &OffsetInfo { leo: 10, hw: 0 }, 2, &offsets_maps(vec![ (5001, OffsetInfo { leo: 4, hw: 0 }), (5002, OffsetInfo::default()) ]) ), Some(4) ); // we take maximum leo since min lrs = 2 assert_eq!( compute_hw( &OffsetInfo { leo: 10, hw: 0 }, 2, &offsets_maps(vec![ (5001, OffsetInfo { leo: 4, hw: 0 }), (5002, OffsetInfo { leo: 6, hw: 0 }) ]) ), Some(6) ); // test with 3 followers assert_eq!( compute_hw( &OffsetInfo { leo: 10, hw: 0 }, 2, &offsets_maps(vec![ (5001, OffsetInfo { leo: 4, hw: 0 }), (5002, OffsetInfo { leo: 6, hw: 0 }), (5003, OffsetInfo { leo: 9, hw: 0 }) ]) ), Some(9) ); // none of the follower has catch up assert_eq!( compute_hw( &OffsetInfo { leo: 10, hw: 7 }, 2, &offsets_maps(vec![ (5001, OffsetInfo { leo: 4, hw: 0 }), (5002, OffsetInfo { leo: 6, hw: 0 }) ]) ), None ); } // test hw calculation for 3 spu and 3 in sync rep #[test] fn test_hw33() { assert_eq!( compute_hw( &OffsetInfo { hw: 0, leo: 10 }, 3, &offsets_maps(vec![ (5001, OffsetInfo::default()), (5002, OffsetInfo::default()), ]) ), None ); // need at least 2 replicas assert_eq!( compute_hw( &OffsetInfo { leo: 10, hw: 0 }, 3, &offsets_maps(vec![ (5001, OffsetInfo { leo: 4, hw: 0 }), (5002, OffsetInfo::default()) ]) ), None ); // 4 is min offset assert_eq!( compute_hw( &OffsetInfo { leo: 10, hw: 0 }, 3, &offsets_maps(vec![ (5001, OffsetInfo { leo: 4, hw: 0 }), (5002, OffsetInfo { leo: 7, hw: 0 }), ]) ), Some(4) ); // no hw update since nothing with 2 followers has replicated assert_eq!( compute_hw( &OffsetInfo { leo: 10, hw: 7 }, 3, &offsets_maps(vec![ (5001, OffsetInfo { leo: 7, hw: 6 }), (5002, OffsetInfo { leo: 8, hw: 6 }), ]) ), None ); // leader can progress to 8 assert_eq!( compute_hw( &OffsetInfo { leo: 10, hw: 7 }, 3, &offsets_maps(vec![ (5001, OffsetInfo { leo: 9, hw: 6 }), (5002, OffsetInfo { leo: 8, hw: 6 }), ]) ), Some(8) ); } } #[cfg(test)] mod test_leader { use async_trait::async_trait; use fluvio_future::test_async; use fluvio_controlplane_metadata::partition::{ReplicaKey, Replica}; use fluvio_storage::{ReplicaStorage, ReplicaStorageConfig, OffsetInfo}; use dataplane::Offset; use dataplane::fixture::{create_recordset}; use crate::{ config::{SpuConfig}, }; use crate::control_plane::StatusMessageSink; use super::*; const MAX_BYTES: u32 = 1000; #[derive(Default)] struct MockConfig {} impl ReplicaStorageConfig for MockConfig {} #[derive(Default)] struct MockStorage { pos: OffsetInfo, } impl From<&SpuConfig> for MockConfig { fn from(_log: &SpuConfig) -> MockConfig { MockConfig::default() } } #[async_trait] impl ReplicaStorage for MockStorage { async fn create( _replica: &dataplane::ReplicaKey, _config: Self::Config, ) -> Result<Self, fluvio_storage::StorageError> { Ok(MockStorage { pos: OffsetInfo { leo: 0, hw: 0 }, }) } fn get_hw(&self) -> Offset { self.pos.hw } fn get_leo(&self) -> Offset { self.pos.leo } async fn read_partition_slice<P>( &self, offset: Offset, _max_len: u32, _isolation: dataplane::Isolation, _partition_response: &mut P, ) -> OffsetInfo where P: fluvio_storage::SlicePartitionResponse + Send, { OffsetInfo { leo: offset, hw: 0 } } // do dummy implementations of write async fn write_recordset( &mut self, records: &mut dataplane::record::RecordSet, update_highwatermark: bool, ) -> Result<(), fluvio_storage::StorageError> { self.pos.leo = records.last_offset().unwrap(); if update_highwatermark { self.pos.hw = self.pos.leo; } Ok(()) } async fn update_high_watermark( &mut self, offset: Offset, ) -> Result<bool, fluvio_storage::StorageError> { self.pos.hw = offset; Ok(true) } type Config = MockConfig; fn get_log_start_offset(&self) -> Offset { todo!() } async fn remove(&self) -> Result<(), fluvio_storage::StorageError> { todo!() } } #[test_async] async fn test_leader_in_sync_replica() -> Result<(), ()> { let leader_config = SpuConfig { id: 5000, ..Default::default() }; let replica: ReplicaKey = ("test", 1).into(); // inserting new replica state, this should set follower offset to -1,-1 as inital state let state: LeaderReplicaState<MockStorage> = LeaderReplicaState::create( Replica::new(replica, 5000, vec![5000]), &leader_config, StatusMessageSink::shared(), ) .await .expect("state"); assert_eq!(state.in_sync_replica, 1); Ok(()) } #[test_async] async fn test_follower_update() -> Result<(), ()> { let leader_config = SpuConfig { id: 5000, ..Default::default() }; let notifier = FollowerNotifier::shared(); let replica: ReplicaKey = ("test", 1).into(); // inserting new replica state, this should set follower offset to -1,-1 as inital state let state: LeaderReplicaState<MockStorage> = LeaderReplicaState::create( Replica::new(replica, 5000, vec![5001, 5002]), &leader_config, StatusMessageSink::shared(), ) .await .expect("state"); // write fake recordset to ensure leo = 10 state .write_record_set(&mut create_recordset(10), &notifier) .await .expect("write"); state.update_hw(2).await.expect("hw"); assert_eq!(state.leo(), 10); assert_eq!(state.hw(), 2); let follower_info = state.followers.read().await; assert!(!follower_info.get(&5001).unwrap().is_valid()); // follower should be invalid sate; drop(follower_info); assert!(state.follower_updates(&5003, MAX_BYTES).await.is_none()); // don't have 5003 assert!(state.follower_updates(&5001, MAX_BYTES).await.is_none()); // 5001 is still invalid assert!(state.follower_updates(&5002, MAX_BYTES).await.is_none()); // 5002 is still invalid // got updated from 5001 which just been initialized let mut followers = state.followers.write().await; followers .get_mut(&5001) .expect("map") .update(&OffsetInfo { leo: 0, hw: 0 }); drop(followers); assert!(state.follower_updates(&5002, MAX_BYTES).await.is_none()); // 5002 is still invalid let updates = state .follower_updates(&5001, MAX_BYTES) .await .expect("some"); assert_eq!(updates.name, "test"); assert_eq!(updates.partitions[0].leo, 10); assert_eq!(updates.partitions[0].hw, 2); // updated from 5002 let mut followers = state.followers.write().await; followers .get_mut(&5002) .expect("map") .update(&OffsetInfo { leo: 0, hw: 0 }); drop(followers); let updates = state .follower_updates(&5002, MAX_BYTES) .await .expect("some"); assert_eq!(updates.name, "test"); assert_eq!(updates.partitions[0].leo, 10); assert_eq!(updates.partitions[0].hw, 2); // 5002 has been fully caught up let mut followers = state.followers.write().await; followers .get_mut(&5002) .expect("map") .update(&OffsetInfo { leo: 10, hw: 2 }); drop(followers); assert!(state.follower_updates(&5002, MAX_BYTES).await.is_none()); // 5002 is still invalid assert!(state.follower_updates(&5001, MAX_BYTES).await.is_some()); // 5001 is still need to besync Ok(()) } #[test_async] async fn test_update_leader_from_followers() -> Result<(), ()> { use crate::core::{GlobalContext}; use fluvio_controlplane_metadata::spu::{SpuSpec}; let leader_config = SpuConfig { id: 5000, ..Default::default() }; let specs = vec![ SpuSpec::new_private_addr(5000, 9000, "localhost".to_owned()), SpuSpec::new_private_addr(5001, 9001, "localhost".to_owned()), SpuSpec::new_private_addr(5002, 9002, "localhost".to_owned()), ]; let gctx: Arc<GlobalContext<MockStorage>> = GlobalContext::new_shared_context(leader_config); gctx.spu_localstore().sync_all(specs); gctx.sync_follower_update().await; let notifier = gctx.follower_notifier(); assert!(notifier.get(&5001).await.is_some()); assert!(notifier.get(&5002).await.is_some()); assert!(notifier.get(&5000).await.is_none()); let replica: ReplicaKey = ("test", 1).into(); // inserting new replica state, this should set follower offset to -1,-1 as inital state let leader: LeaderReplicaState<MockStorage> = LeaderReplicaState::create( Replica::new(replica.clone(), 5000, vec![5000, 5001, 5002]), gctx.config(), StatusMessageSink::shared(), ) .await .expect("state"); // follower's offset should be init let follower_info = leader.followers_info().await; assert_eq!(follower_info.get(&5001).unwrap().leo, -1); assert_eq!(follower_info.get(&5001).unwrap().hw, -1); let f1 = notifier.get(&5001).await.expect("5001"); let f2 = notifier.get(&5002).await.expect("5002"); // write fake recordset to ensure leo = 10 leader .write_record_set(&mut create_recordset(10), &notifier) .await .expect("write"); // check leader leo = 10 and hw = 2 assert_eq!(leader.leo(), 10); assert_eq!(leader.hw(), 0); assert!(f1.drain_replicas().await.is_empty()); assert!(f2.drain_replicas().await.is_empty()); // handle invalidate offset update from follower assert!( !leader .update_states_from_followers(5001, OffsetInfo { leo: 5, hw: 20 }, &notifier) .await ); assert_eq!(leader.hw(), 0); assert!(f1.drain_replicas().await.is_empty()); // update from invalid follower assert!( !leader .update_states_from_followers(5004, OffsetInfo { leo: 6, hw: 11 }, &notifier) .await ); assert_eq!(leader.hw(), 0); // handle newer leo assert!( !leader .update_states_from_followers(5001, OffsetInfo { leo: 20, hw: 0 }, &notifier) .await ); assert_eq!(leader.hw(), 0); assert!(!f1.has_replica(&replica).await); // no update to follower required debug!(offsets = ?leader.followers_info().await,"updating 5001 with leo=0,hw=0"); assert!( leader .update_states_from_followers(5001, OffsetInfo { leo: 0, hw: 0 }, &notifier) .await ); assert_eq!(leader.hw(), 0); // no change on hw since we just updated the update true follower's state assert!(f1.drain_replicas().await.contains(&replica)); // debug!(f2 = ?f2.drain_replicas().await); assert!(f2.drain_replicas().await.is_empty()); // f2 is still invalid // 5001 partial update, follower still need to sync up with leader debug!(offsets = ?leader.followers_info().await,"updating 5001 with leo=6,hw=0"); assert!( leader .update_states_from_followers(5001, OffsetInfo { leo: 6, hw: 0 }, &notifier) .await ); assert_eq!(leader.hw(), 0); assert!(f1.drain_replicas().await.contains(&replica)); // 5001 has fully caught up with leader, nothing to update followers until 5002 has update debug!(offsets = ?leader.followers_info().await,"updating 5001 with leo=10,hw=0"); assert!( leader .update_states_from_followers(5001, OffsetInfo { leo: 10, hw: 0 }, &notifier) .await ); assert_eq!(leader.hw(), 0); assert!(f1.drain_replicas().await.is_empty()); assert!(f2.drain_replicas().await.is_empty()); let follower_info = leader.followers_info().await; assert_eq!(follower_info.get(&5001).unwrap().leo, 10); assert_eq!(follower_info.get(&5001).unwrap().hw, 0); // init 5002 debug!(offsets = ?leader.followers_info().await,"updating 5002 with leo=0,hw=0"); assert!( leader .update_states_from_followers(5002, OffsetInfo { leo: 0, hw: 0 }, &notifier) .await ); assert_eq!(leader.hw(), 0); assert!(f2.drain_replicas().await.contains(&replica)); // partial update of 5002, this lead hw to 6, both followers will be updated debug!(offsets = ?leader.followers_info().await,"updating 5002 with leo=6,hw=0"); assert!( leader .update_states_from_followers(5002, OffsetInfo { leo: 6, hw: 0 }, &notifier) .await ); assert_eq!(leader.hw(), 6); assert!(f1.drain_replicas().await.contains(&replica)); assert!(f2.drain_replicas().await.contains(&replica)); // 5002 full update, both followers will be updated debug!(offsets = ?leader.followers_info().await,"updating 5002 with leo=10,hw=0"); assert!( leader .update_states_from_followers(5002, OffsetInfo { leo: 10, hw: 0 }, &notifier) .await ); assert_eq!(leader.hw(), 10); assert!(f1.drain_replicas().await.contains(&replica)); assert!(f2.drain_replicas().await.contains(&replica)); // 5002 same update, 5001 still need update debug!(offsets = ?leader.followers_info().await,"updating 5002 with leo=10,hw=10"); assert!( leader .update_states_from_followers(5002, OffsetInfo { leo: 10, hw: 10 }, &notifier) .await ); assert_eq!(leader.hw(), 10); assert!(f1.drain_replicas().await.contains(&replica)); assert!(f2.drain_replicas().await.is_empty()); // 5001 is now same as both leader and 5002 debug!(offsets = ?leader.followers_info().await,"updating 5001 with leo=10,hw=10"); assert!( leader .update_states_from_followers(5001, OffsetInfo { leo: 10, hw: 10 }, &notifier) .await ); assert_eq!(leader.hw(), 10); assert!(f1.drain_replicas().await.is_empty()); assert!(f2.drain_replicas().await.is_empty()); Ok(()) } }
pub async fn create<'a, C>( replica: Replica, config: &'a C, status_update: SharedStatusUpdate,
game_builder.rs
//! # GameBuilder Helper //! Utility for creating complex games with non standard komi, handicap etc... //! # Exemple //! ``` //! use crate::goban::rules::game_builder::GameBuilder; //! use crate::goban::rules::Rule; //! use goban::rules::game::Game; //! //! let mut builder = GameBuilder::default(); //! // or //! let mut builder = Game::builder(); //! let game = builder //! .rule(Rule::Japanese) //! .size((19,19)) //! .handicap(&[(3,3), (4,4)]) //! .komi(10.) //! .build(); //! ``` use crate::pieces::goban::Goban; use crate::pieces::Nat; use crate::pieces::stones::Color; use crate::pieces::util::coord::Point; use crate::rules::{EndGame, Move, Player, Rule}; use crate::rules::game::Game; use crate::rules::Rule::Chinese; pub struct GameBuilder { size: (u32, u32), komi: f32, manual_komi: bool, black_player: String, white_player: String, rule: Rule, handicap_points: Vec<Point>, turn: Option<Player>, moves: Vec<Move>, outcome: Option<EndGame>, } impl GameBuilder { fn new() -> GameBuilder { GameBuilder { size: (19, 19), komi: Chinese.komi(), manual_komi: false, black_player: "".to_string(), white_player: "".to_string(), handicap_points: vec![], rule: Chinese, turn: None, moves: vec![], outcome: None, } } pub fn moves(&mut self, moves: &[Move]) -> &mut Self { self.moves = moves.to_vec(); self } pub fn outcome(&mut self, outcome: EndGame) -> &mut Self { self.outcome = Some(outcome); self } /// Overrides the turn because it's a game with handicap. So White begins. pub fn handicap(&mut self, points: &[Point]) -> &mut Self { self.handicap_points = points.to_vec(); self } pub fn size(&mut self, size: (u32, u32)) -> &mut Self { self.size = size; self } pub fn komi(&mut self, komi: f32) -> &mut Self { self.komi = komi; self.manual_komi = true; self } pub fn turn(&mut self, turn: Player) -> &mut Self { self.turn = Some(turn); self } pub fn
(&mut self, black_player_name: &str) -> &mut Self { self.black_player = black_player_name.to_string(); self } pub fn rule(&mut self, rule: Rule) -> &mut Self { self.rule = rule; if !self.manual_komi { self.komi = rule.komi(); } self } pub fn white_player(&mut self, white_player_name: &str) -> &mut Self { self.white_player = white_player_name.to_string(); self } pub fn build(&mut self) -> Result<Game, String> { let mut goban: Goban = Goban::new((self.size.0 as Nat, self.size.1 as Nat)); goban.push_many(&self.handicap_points, Color::Black); if self.handicap_points.is_empty() && self.turn.is_none() { self.turn = Some(Player::Black) } else { self.turn = Some(Player::White) } let mut g = Game { goban, passes: 0, prisoners: (0, 0), outcome: self.outcome, <<<<<<< HEAD move_num: 0, turn: self.turn, ======= turn: self.turn.unwrap_or(Player::Black), >>>>>>> root_branch/master komi: self.komi, rule: self.rule, handicap: self.handicap_points.len() as u8, #[cfg(feature = "history")] history: vec![], #[cfg(feature = "history")] moves_history: vec![], hashes: Default::default(), last_hash: 0, ko_point: None, }; for &m in &self.moves { g.play(m); } Ok(g) } } impl Default for GameBuilder { fn default() -> Self { Self::new() } } impl Game { pub fn builder() -> GameBuilder { GameBuilder::default() } }
black_player
ErrorPage.js
import {Button} from "@material-ui/core"; import React from "react"; export const errorPage = (message = "No Internet Connection", onclick, button = <Button onClick={onclick}>Retry</Button>) => ( <div className={"errorPage text-center"}
<br/> <div className={"text-truncate"}>{message}</div> {button} </div> );
style={{position: "absolute", top: "50%", left: "50%", transform: "translate(-50%, -50%)"}}> <img src={"./assets/icons/darkmode_nothingfound.svg"} style={{width: "8rem", height: "auto"}} alt={"Kabeers Music Logo"}/>
update_health_monitor_option.py
# coding: utf-8 import pprint import re import six class UpdateHealthMonitorOption: """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ sensitive_list = [] openapi_types = { 'admin_state_up': 'bool', 'delay': 'int', 'domain_name': 'str', 'expected_codes': 'str', 'http_method': 'str', 'max_retries': 'int', 'max_retries_down': 'int', 'monitor_port': 'int', 'name': 'str', 'timeout': 'int', 'url_path': 'str', 'type': 'str' } attribute_map = { 'admin_state_up': 'admin_state_up', 'delay': 'delay', 'domain_name': 'domain_name', 'expected_codes': 'expected_codes', 'http_method': 'http_method', 'max_retries': 'max_retries', 'max_retries_down': 'max_retries_down', 'monitor_port': 'monitor_port', 'name': 'name', 'timeout': 'timeout', 'url_path': 'url_path', 'type': 'type' } def
(self, admin_state_up=None, delay=None, domain_name=None, expected_codes=None, http_method=None, max_retries=None, max_retries_down=None, monitor_port=None, name=None, timeout=None, url_path=None, type=None): """UpdateHealthMonitorOption - a model defined in huaweicloud sdk""" self._admin_state_up = None self._delay = None self._domain_name = None self._expected_codes = None self._http_method = None self._max_retries = None self._max_retries_down = None self._monitor_port = None self._name = None self._timeout = None self._url_path = None self._type = None self.discriminator = None if admin_state_up is not None: self.admin_state_up = admin_state_up if delay is not None: self.delay = delay if domain_name is not None: self.domain_name = domain_name if expected_codes is not None: self.expected_codes = expected_codes if http_method is not None: self.http_method = http_method if max_retries is not None: self.max_retries = max_retries if max_retries_down is not None: self.max_retries_down = max_retries_down if monitor_port is not None: self.monitor_port = monitor_port if name is not None: self.name = name if timeout is not None: self.timeout = timeout if url_path is not None: self.url_path = url_path if type is not None: self.type = type @property def admin_state_up(self): """Gets the admin_state_up of this UpdateHealthMonitorOption. 功能说明:管理状态true/false。使用说明:默认为true,true表示开启健康检查,false表示关闭健康检查。 :return: The admin_state_up of this UpdateHealthMonitorOption. :rtype: bool """ return self._admin_state_up @admin_state_up.setter def admin_state_up(self, admin_state_up): """Sets the admin_state_up of this UpdateHealthMonitorOption. 功能说明:管理状态true/false。使用说明:默认为true,true表示开启健康检查,false表示关闭健康检查。 :param admin_state_up: The admin_state_up of this UpdateHealthMonitorOption. :type: bool """ self._admin_state_up = admin_state_up @property def delay(self): """Gets the delay of this UpdateHealthMonitorOption. 健康检查间隔。 :return: The delay of this UpdateHealthMonitorOption. :rtype: int """ return self._delay @delay.setter def delay(self, delay): """Sets the delay of this UpdateHealthMonitorOption. 健康检查间隔。 :param delay: The delay of this UpdateHealthMonitorOption. :type: int """ self._delay = delay @property def domain_name(self): """Gets the domain_name of this UpdateHealthMonitorOption. 功能说明:健康检查测试member健康状态时,发送的http请求的域名。仅当type为HTTP时生效。使用说明:默认为空,表示使用负载均衡器的vip作为http请求的目的地址。以数字或字母开头,只能包含数字、字母、’-’、’.’。 :return: The domain_name of this UpdateHealthMonitorOption. :rtype: str """ return self._domain_name @domain_name.setter def domain_name(self, domain_name): """Sets the domain_name of this UpdateHealthMonitorOption. 功能说明:健康检查测试member健康状态时,发送的http请求的域名。仅当type为HTTP时生效。使用说明:默认为空,表示使用负载均衡器的vip作为http请求的目的地址。以数字或字母开头,只能包含数字、字母、’-’、’.’。 :param domain_name: The domain_name of this UpdateHealthMonitorOption. :type: str """ self._domain_name = domain_name @property def expected_codes(self): """Gets the expected_codes of this UpdateHealthMonitorOption. 期望HTTP响应状态码,指定下列值:单值,例如200;列表,例如200,202;区间,例如200-204。仅当type为HTTP时生效。该字段为预留字段,暂未启用。 :return: The expected_codes of this UpdateHealthMonitorOption. :rtype: str """ return self._expected_codes @expected_codes.setter def expected_codes(self, expected_codes): """Sets the expected_codes of this UpdateHealthMonitorOption. 期望HTTP响应状态码,指定下列值:单值,例如200;列表,例如200,202;区间,例如200-204。仅当type为HTTP时生效。该字段为预留字段,暂未启用。 :param expected_codes: The expected_codes of this UpdateHealthMonitorOption. :type: str """ self._expected_codes = expected_codes @property def http_method(self): """Gets the http_method of this UpdateHealthMonitorOption. HTTP方法,可以为GET、HEAD、POST、PUT、DELETE、TRACE、OPTIONS、CONNECT、PATCH。仅当type为HTTP时生效。该字段为预留字段,暂未启用。 :return: The http_method of this UpdateHealthMonitorOption. :rtype: str """ return self._http_method @http_method.setter def http_method(self, http_method): """Sets the http_method of this UpdateHealthMonitorOption. HTTP方法,可以为GET、HEAD、POST、PUT、DELETE、TRACE、OPTIONS、CONNECT、PATCH。仅当type为HTTP时生效。该字段为预留字段,暂未启用。 :param http_method: The http_method of this UpdateHealthMonitorOption. :type: str """ self._http_method = http_method @property def max_retries(self): """Gets the max_retries of this UpdateHealthMonitorOption. 最大重试次数 :return: The max_retries of this UpdateHealthMonitorOption. :rtype: int """ return self._max_retries @max_retries.setter def max_retries(self, max_retries): """Sets the max_retries of this UpdateHealthMonitorOption. 最大重试次数 :param max_retries: The max_retries of this UpdateHealthMonitorOption. :type: int """ self._max_retries = max_retries @property def max_retries_down(self): """Gets the max_retries_down of this UpdateHealthMonitorOption. 健康检查连续成功多少次后,将后端服务器的健康检查状态由ONLIEN判定为OFFLINE :return: The max_retries_down of this UpdateHealthMonitorOption. :rtype: int """ return self._max_retries_down @max_retries_down.setter def max_retries_down(self, max_retries_down): """Sets the max_retries_down of this UpdateHealthMonitorOption. 健康检查连续成功多少次后,将后端服务器的健康检查状态由ONLIEN判定为OFFLINE :param max_retries_down: The max_retries_down of this UpdateHealthMonitorOption. :type: int """ self._max_retries_down = max_retries_down @property def monitor_port(self): """Gets the monitor_port of this UpdateHealthMonitorOption. 健康检查端口号。默认为空,表示使用后端云服务器组的端口。 :return: The monitor_port of this UpdateHealthMonitorOption. :rtype: int """ return self._monitor_port @monitor_port.setter def monitor_port(self, monitor_port): """Sets the monitor_port of this UpdateHealthMonitorOption. 健康检查端口号。默认为空,表示使用后端云服务器组的端口。 :param monitor_port: The monitor_port of this UpdateHealthMonitorOption. :type: int """ self._monitor_port = monitor_port @property def name(self): """Gets the name of this UpdateHealthMonitorOption. 健康检查名称。 :return: The name of this UpdateHealthMonitorOption. :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this UpdateHealthMonitorOption. 健康检查名称。 :param name: The name of this UpdateHealthMonitorOption. :type: str """ self._name = name @property def timeout(self): """Gets the timeout of this UpdateHealthMonitorOption. 健康检查的超时时间。建议该值小于delay的值。 :return: The timeout of this UpdateHealthMonitorOption. :rtype: int """ return self._timeout @timeout.setter def timeout(self, timeout): """Sets the timeout of this UpdateHealthMonitorOption. 健康检查的超时时间。建议该值小于delay的值。 :param timeout: The timeout of this UpdateHealthMonitorOption. :type: int """ self._timeout = timeout @property def url_path(self): """Gets the url_path of this UpdateHealthMonitorOption. 功能说明:健康检查测试member健康时发送的http请求路径。默认为“/”。使用说明:以“/”开头。仅当type为HTTP时生效。 :return: The url_path of this UpdateHealthMonitorOption. :rtype: str """ return self._url_path @url_path.setter def url_path(self, url_path): """Sets the url_path of this UpdateHealthMonitorOption. 功能说明:健康检查测试member健康时发送的http请求路径。默认为“/”。使用说明:以“/”开头。仅当type为HTTP时生效。 :param url_path: The url_path of this UpdateHealthMonitorOption. :type: str """ self._url_path = url_path @property def type(self): """Gets the type of this UpdateHealthMonitorOption. 描述:健康检查类型。 取值:TCP,UDP_CONNECT,HTTP,HTTPS,PING 约束: 1、若pool的protocol为QUIC,则type只能是UDP :return: The type of this UpdateHealthMonitorOption. :rtype: str """ return self._type @type.setter def type(self, type): """Sets the type of this UpdateHealthMonitorOption. 描述:健康检查类型。 取值:TCP,UDP_CONNECT,HTTP,HTTPS,PING 约束: 1、若pool的protocol为QUIC,则type只能是UDP :param type: The type of this UpdateHealthMonitorOption. :type: str """ self._type = type def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = "****" else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, UpdateHealthMonitorOption): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
__init__
defense.py
"""Implementation of sample defense. This defense loads inception resnet v2 checkpoint and classifies all images using loaded checkpoint. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np from scipy.misc import imread import tensorflow as tf import inception_resnet_v2 slim = tf.contrib.slim tf.flags.DEFINE_string( 'master', '', 'The address of the TensorFlow master to use.')
'checkpoint_path', '', 'Path to checkpoint for inception network.') tf.flags.DEFINE_string( 'input_dir', '', 'Input directory with images.') tf.flags.DEFINE_string( 'output_file', '', 'Output file to save labels.') tf.flags.DEFINE_integer( 'image_width', 299, 'Width of each input images.') tf.flags.DEFINE_integer( 'image_height', 299, 'Height of each input images.') tf.flags.DEFINE_integer( 'batch_size', 16, 'How many images process at one time.') FLAGS = tf.flags.FLAGS def load_images(input_dir, batch_shape): """Read png images from input directory in batches. Args: input_dir: input directory batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3] Yields: filenames: list file names without path of each image Lenght of this list could be less than batch_size, in this case only first few images of the result are elements of the minibatch. images: array with all images from this batch """ images = np.zeros(batch_shape) filenames = [] idx = 0 batch_size = batch_shape[0] for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')): with tf.gfile.Open(filepath) as f: image = imread(f, mode='RGB').astype(np.float) / 255.0 # Images for inception classifier are normalized to be in [-1, 1] interval. images[idx, :, :, :] = image * 2.0 - 1.0 filenames.append(os.path.basename(filepath)) idx += 1 if idx == batch_size: yield filenames, images filenames = [] images = np.zeros(batch_shape) idx = 0 if idx > 0: yield filenames, images def main(_): batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3] num_classes = 1001 tf.logging.set_verbosity(tf.logging.INFO) with tf.Graph().as_default(): # Prepare graph x_input = tf.placeholder(tf.float32, shape=batch_shape) with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()): _, end_points = inception_resnet_v2.inception_resnet_v2( x_input, num_classes=num_classes, is_training=False) predicted_labels = tf.argmax(end_points['Predictions'], 1) # Run computation saver = tf.train.Saver(slim.get_model_variables()) session_creator = tf.train.ChiefSessionCreator( scaffold=tf.train.Scaffold(saver=saver), checkpoint_filename_with_path=FLAGS.checkpoint_path, master=FLAGS.master) with tf.train.MonitoredSession(session_creator=session_creator) as sess: with tf.gfile.Open(FLAGS.output_file, 'w') as out_file: for filenames, images in load_images(FLAGS.input_dir, batch_shape): labels = sess.run(predicted_labels, feed_dict={x_input: images}) for filename, label in zip(filenames, labels): out_file.write('{0},{1}\n'.format(filename, label)) if __name__ == '__main__': tf.app.run()
tf.flags.DEFINE_string(
ExplanationOfBenefit_SubDetail.rs
#![allow(unused_imports, non_camel_case_types)] use crate::models::r4b::CodeableConcept::CodeableConcept; use crate::models::r4b::Element::Element; use crate::models::r4b::ExplanationOfBenefit_Adjudication::ExplanationOfBenefit_Adjudication; use crate::models::r4b::Extension::Extension; use crate::models::r4b::Money::Money; use crate::models::r4b::Quantity::Quantity; use crate::models::r4b::Reference::Reference; use serde_json::json; use serde_json::value::Value; use std::borrow::Cow; /// This resource provides: the claim details; adjudication details from the /// processing of a Claim; and optionally account balance information, for informing /// the subscriber of the benefits provided. #[derive(Debug)] pub struct ExplanationOfBenefit_SubDetail<'a> { pub(crate) value: Cow<'a, Value>, } impl ExplanationOfBenefit_SubDetail<'_> { pub fn new(value: &Value) -> ExplanationOfBenefit_SubDetail { ExplanationOfBenefit_SubDetail { value: Cow::Borrowed(value), } } pub fn to_json(&self) -> Value { (*self.value).clone() } /// Extensions for factor pub fn _factor(&self) -> Option<Element> { if let Some(val) = self.value.get("_factor") { return Some(Element { value: Cow::Borrowed(val), }); } return None; } /// Extensions for noteNumber pub fn _note_number(&self) -> Option<Vec<Element>> { if let Some(Value::Array(val)) = self.value.get("_noteNumber") { return Some( val.into_iter() .map(|e| Element { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// Extensions for sequence pub fn _sequence(&self) -> Option<Element> { if let Some(val) = self.value.get("_sequence") { return Some(Element { value: Cow::Borrowed(val), }); } return None; } /// The adjudication results. pub fn adjudication(&self) -> Option<Vec<ExplanationOfBenefit_Adjudication>> { if let Some(Value::Array(val)) = self.value.get("adjudication") { return Some( val.into_iter() .map(|e| ExplanationOfBenefit_Adjudication { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// Code to identify the general type of benefits under which products and services /// are provided. pub fn category(&self) -> Option<CodeableConcept> { if let Some(val) = self.value.get("category") { return Some(CodeableConcept { value: Cow::Borrowed(val), }); } return None; } /// May be used to represent additional information that is not part of the basic /// definition of the element. To make the use of extensions safe and manageable, /// there is a strict set of governance applied to the definition and use of /// extensions. Though any implementer can define an extension, there is a set of /// requirements that SHALL be met as part of the definition of the extension. pub fn extension(&self) -> Option<Vec<Extension>> { if let Some(Value::Array(val)) = self.value.get("extension") { return Some( val.into_iter() .map(|e| Extension { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// A real number that represents a multiplier used in determining the overall value /// of services delivered and/or goods received. The concept of a Factor allows for a /// discount or surcharge multiplier to be applied to a monetary amount. pub fn factor(&self) -> Option<f64> { if let Some(val) = self.value.get("factor") { return Some(val.as_f64().unwrap()); } return None; } /// Unique id for the element within a resource (for internal references). This may be /// any string value that does not contain spaces. pub fn id(&self) -> Option<&str> { if let Some(Value::String(string)) = self.value.get("id") { return Some(string); } return None; } /// Item typification or modifiers codes to convey additional context for the product /// or service. pub fn modifier(&self) -> Option<Vec<CodeableConcept>> { if let Some(Value::Array(val)) = self.value.get("modifier") { return Some( val.into_iter() .map(|e| CodeableConcept { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// May be used to represent additional information that is not part of the basic /// definition of the element and that modifies the understanding of the element /// in which it is contained and/or the understanding of the containing element's /// descendants. Usually modifier elements provide negation or qualification. To make /// the use of extensions safe and manageable, there is a strict set of governance /// applied to the definition and use of extensions. Though any implementer can define /// an extension, there is a set of requirements that SHALL be met as part of the /// definition of the extension. Applications processing a resource are required to /// check for modifier extensions. Modifier extensions SHALL NOT change the meaning /// of any elements on Resource or DomainResource (including cannot change the meaning /// of modifierExtension itself). pub fn modifier_extension(&self) -> Option<Vec<Extension>> { if let Some(Value::Array(val)) = self.value.get("modifierExtension") { return Some( val.into_iter() .map(|e| Extension { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// The quantity times the unit price for an additional service or product or charge. pub fn net(&self) -> Option<Money> { if let Some(val) = self.value.get("net") { return Some(Money { value: Cow::Borrowed(val), }); } return None; } /// The numbers associated with notes below which apply to the adjudication of this /// item. pub fn note_number(&self) -> Option<Vec<i64>> { if let Some(Value::Array(val)) = self.value.get("noteNumber") { return Some( val.into_iter() .map(|e| e.as_i64().unwrap()) .collect::<Vec<_>>(), ); } return None; } /// When the value is a group code then this item collects a set of related claim /// details, otherwise this contains the product, service, drug or other billing code /// for the item. pub fn product_or_service(&self) -> CodeableConcept { CodeableConcept { value: Cow::Borrowed(&self.value["productOrService"]), } } /// Identifies the program under which this may be recovered. pub fn program_code(&self) -> Option<Vec<CodeableConcept>> { if let Some(Value::Array(val)) = self.value.get("programCode") { return Some( val.into_iter() .map(|e| CodeableConcept { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// The number of repetitions of a service or product. pub fn quantity(&self) -> Option<Quantity> { if let Some(val) = self.value.get("quantity") { return Some(Quantity { value: Cow::Borrowed(val), }); } return None; } /// The type of revenue or cost center providing the product and/or service. pub fn revenue(&self) -> Option<CodeableConcept> { if let Some(val) = self.value.get("revenue") { return Some(CodeableConcept { value: Cow::Borrowed(val), }); } return None; } /// A claim detail line. Either a simple (a product or service) or a 'group' of sub- /// details which are simple items. pub fn sequence(&self) -> Option<i64> { if let Some(val) = self.value.get("sequence") { return Some(val.as_i64().unwrap()); } return None; } /// Unique Device Identifiers associated with this line item. pub fn udi(&self) -> Option<Vec<Reference>> { if let Some(Value::Array(val)) = self.value.get("udi") { return Some( val.into_iter() .map(|e| Reference { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// If the item is not a group then this is the fee for the product or service, /// otherwise this is the total of the fees for the details of the group. pub fn unit_price(&self) -> Option<Money> { if let Some(val) = self.value.get("unitPrice") { return Some(Money { value: Cow::Borrowed(val), }); } return None; } pub fn validate(&self) -> bool { if let Some(_val) = self._factor() { if !_val.validate() { return false; } } if let Some(_val) = self._note_number() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self._sequence() { if !_val.validate() { return false; } } if let Some(_val) = self.adjudication() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.category() { if !_val.validate() { return false; } } if let Some(_val) = self.extension() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.factor() {} if let Some(_val) = self.id() {} if let Some(_val) = self.modifier() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.modifier_extension() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.net() { if !_val.validate() { return false; } } if let Some(_val) = self.note_number() { _val.into_iter().for_each(|_e| {}); } if !self.product_or_service().validate() { return false; } if let Some(_val) = self.program_code() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.quantity() { if !_val.validate() { return false; } } if let Some(_val) = self.revenue() { if !_val.validate() { return false; } } if let Some(_val) = self.sequence() {} if let Some(_val) = self.udi() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.unit_price() { if !_val.validate() { return false; } } return true; } } #[derive(Debug)] pub struct ExplanationOfBenefit_SubDetailBuilder { pub(crate) value: Value,
impl ExplanationOfBenefit_SubDetailBuilder { pub fn build(&self) -> ExplanationOfBenefit_SubDetail { ExplanationOfBenefit_SubDetail { value: Cow::Owned(self.value.clone()), } } pub fn with(existing: ExplanationOfBenefit_SubDetail) -> ExplanationOfBenefit_SubDetailBuilder { ExplanationOfBenefit_SubDetailBuilder { value: (*existing.value).clone(), } } pub fn new(product_or_service: CodeableConcept) -> ExplanationOfBenefit_SubDetailBuilder { let mut __value: Value = json!({}); __value["productOrService"] = json!(product_or_service.value); return ExplanationOfBenefit_SubDetailBuilder { value: __value }; } pub fn _factor<'a>( &'a mut self, val: Element, ) -> &'a mut ExplanationOfBenefit_SubDetailBuilder { self.value["_factor"] = json!(val.value); return self; } pub fn _note_number<'a>( &'a mut self, val: Vec<Element>, ) -> &'a mut ExplanationOfBenefit_SubDetailBuilder { self.value["_noteNumber"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn _sequence<'a>( &'a mut self, val: Element, ) -> &'a mut ExplanationOfBenefit_SubDetailBuilder { self.value["_sequence"] = json!(val.value); return self; } pub fn adjudication<'a>( &'a mut self, val: Vec<ExplanationOfBenefit_Adjudication>, ) -> &'a mut ExplanationOfBenefit_SubDetailBuilder { self.value["adjudication"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn category<'a>( &'a mut self, val: CodeableConcept, ) -> &'a mut ExplanationOfBenefit_SubDetailBuilder { self.value["category"] = json!(val.value); return self; } pub fn extension<'a>( &'a mut self, val: Vec<Extension>, ) -> &'a mut ExplanationOfBenefit_SubDetailBuilder { self.value["extension"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn factor<'a>(&'a mut self, val: f64) -> &'a mut ExplanationOfBenefit_SubDetailBuilder { self.value["factor"] = json!(val); return self; } pub fn id<'a>(&'a mut self, val: &str) -> &'a mut ExplanationOfBenefit_SubDetailBuilder { self.value["id"] = json!(val); return self; } pub fn modifier<'a>( &'a mut self, val: Vec<CodeableConcept>, ) -> &'a mut ExplanationOfBenefit_SubDetailBuilder { self.value["modifier"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn modifier_extension<'a>( &'a mut self, val: Vec<Extension>, ) -> &'a mut ExplanationOfBenefit_SubDetailBuilder { self.value["modifierExtension"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn net<'a>(&'a mut self, val: Money) -> &'a mut ExplanationOfBenefit_SubDetailBuilder { self.value["net"] = json!(val.value); return self; } pub fn note_number<'a>( &'a mut self, val: Vec<i64>, ) -> &'a mut ExplanationOfBenefit_SubDetailBuilder { self.value["noteNumber"] = json!(val); return self; } pub fn program_code<'a>( &'a mut self, val: Vec<CodeableConcept>, ) -> &'a mut ExplanationOfBenefit_SubDetailBuilder { self.value["programCode"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn quantity<'a>( &'a mut self, val: Quantity, ) -> &'a mut ExplanationOfBenefit_SubDetailBuilder { self.value["quantity"] = json!(val.value); return self; } pub fn revenue<'a>( &'a mut self, val: CodeableConcept, ) -> &'a mut ExplanationOfBenefit_SubDetailBuilder { self.value["revenue"] = json!(val.value); return self; } pub fn sequence<'a>(&'a mut self, val: i64) -> &'a mut ExplanationOfBenefit_SubDetailBuilder { self.value["sequence"] = json!(val); return self; } pub fn udi<'a>( &'a mut self, val: Vec<Reference>, ) -> &'a mut ExplanationOfBenefit_SubDetailBuilder { self.value["udi"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn unit_price<'a>( &'a mut self, val: Money, ) -> &'a mut ExplanationOfBenefit_SubDetailBuilder { self.value["unitPrice"] = json!(val.value); return self; } }
}
chat.test.ts
import crypto from 'crypto' import fs from 'fs' import Bot from '../lib' import config from './tests.config' import {timeout} from '../lib/utils' import {pollFor} from './test-utils' import {promisify} from 'util' import {TopicType, ChatChannel, MsgSummary, BotCommandsAdvertisementTyp} from '../lib/types/chat1' import {OnMessage, ReadResult} from '../lib/chat-client' // HACK: // typescript does not believe certain scenarios could be mutated // see for example: https://github.com/Microsoft/TypeScript/issues/12176 // This is easy solution for the timeout check const coerceMsgSummary = (m: MsgSummary | null): MsgSummary => (m as unknown) as MsgSummary test('Chat methods with an uninitialized bot', (): void => { const alice1 = new Bot() const channel = {name: `${config.bots.alice1.username},${config.bots.bob1.username}`} const message = {body: 'Testing!'} // @ts-ignore because it intentionally has bad arguments expect(alice1.chat.list()).rejects.toThrowError() // @ts-ignore because it intentionally has bad arguments expect(alice1.chat.read()).rejects.toThrowError() // @ts-ignore because it intentionally has bad arguments expect(alice1.chat.send(channel, message)).rejects.toThrowError() // @ts-ignore because it intentionally has bad arguments expect(alice1.chat.delete(channel, 314)).rejects.toThrowError() }) describe('Chat Methods', (): void => { const alice1 = new Bot() const alice2 = new Bot() const bob = new Bot() const channel: ChatChannel = {name: `${config.bots.alice1.username},${config.bots.bob1.username}`} const teamChannel: ChatChannel = { name: config.teams.acme.teamname, public: false, topicType: 'chat', membersType: 'team', topicName: 'general', } const message = {body: 'Test message!'} const invalidChannel = {name: 'kbot,'} const invalidMessage = {bdy: 'blah'} const channelMatcher = expect.objectContaining({ name: expect.any(String), membersType: expect.any(String), }) const conversationMatcher = expect.objectContaining({ id: expect.any(String), channel: channelMatcher, unread: expect.any(Boolean), activeAt: expect.any(Number), activeAtMs: expect.any(Number), memberStatus: expect.any(String), }) const messageMatcher = expect.objectContaining({ id: expect.any(Number), channel: channelMatcher, sender: expect.objectContaining({ deviceId: expect.any(String), uid: expect.any(String), }), sentAt: expect.any(Number), sentAtMs: expect.any(Number), content: expect.objectContaining({ type: expect.any(String), }), unread: expect.any(Boolean), }) beforeAll( async (): Promise<void> => { await alice1.init(config.bots.alice1.username, config.bots.alice1.paperkey) await alice2.init(config.bots.alice2.username, config.bots.alice2.paperkey) await bob.init(config.bots.bob1.username, config.bots.bob1.paperkey) } ) afterAll( async (): Promise<void> => { await alice1.deinit() await alice2.deinit() await bob.deinit() } ) it('watchForNewConversation', async (): Promise<void> => { try { await alice1.team.removeMember({team: config.teams.alicesPlayground.teamname, username: config.bots.bob1.username}) } catch (err) { console.log('Caught err on removing existing membership') } finally { // We seem to need to track this because otherwise it'll pick up ones in later tests let seenOneYet = false const toWait = new Promise(async (resolve, reject) => { await bob.chat.watchForNewConversation( conv => { if (!seenOneYet) { seenOneYet = true expect(conv.channel.name).toBe(config.teams.alicesPlayground.teamname) expect(conv.channel.topicName).toBe('general') resolve() } }, err => reject(err) ) }) await alice1.team.addMembers({ team: config.teams.alicesPlayground.teamname, usernames: [{username: config.bots.bob1.username, role: 'writer'}], }) await toWait } }) describe('Chat list', (): void => { it('Returns all chat conversations in an array', async (): Promise<void> => { const conversations = await alice1.chat.list() expect(Array.isArray(conversations)).toBe(true) for (const conversation of conversations) { expect(conversation).toEqual(conversationMatcher) } })
for (const conversation of conversations) { expect(conversation).toHaveProperty('unread', true) } }) it('Shows only messages of a specific topic type if given the option', async (): Promise<void> => { const conversations = await alice1.chat.list({topicType: TopicType.DEV}) for (const conversation of conversations) { expect(conversation.channel).toHaveProperty('topicType', 'dev') } }) }) describe('Chat read', (): void => { it('Retrieves all messages in a conversation', async (): Promise<void> => { const result = await alice1.chat.read(channel) expect(Array.isArray(result.messages)).toBe(true) for (const message of result.messages) { expect(message).toEqual(messageMatcher) } }) it('Shows only unread messages if given the option', async (): Promise<void> => { await bob.chat.send(channel, message) const result = await alice1.chat.read(channel, {unreadOnly: true}) expect(Array.isArray(result.messages)).toBe(true) for (const message of result.messages) { expect(message).toHaveProperty('unread', true) } }) it("Doesn't mark messages read on peek", async (): Promise<void> => { // No peeking: message should be unread on first read, and read on subsequent reads let result = await alice1.chat.read(channel) await bob.chat.send(channel, message) result = await alice1.chat.read(channel) expect(result.messages[0]).toHaveProperty('unread', true) result = await alice1.chat.read(channel) expect(result.messages[0]).toHaveProperty('unread', false) // Now let's peek. Messages should remain unread on subsequent reads. await bob.chat.send(channel, message) result = await alice1.chat.read(channel, {peek: true}) expect(result.messages[0]).toHaveProperty('unread', true) result = await alice1.chat.read(channel) expect(result.messages[0]).toHaveProperty('unread', true) }) it('Allows a user to properly paginate over the messages', async (): Promise<void> => { // Mark all messages as read await alice1.chat.read(channel) // Prepare some new messages const expectedCount = 10 for (let i = 0; i < expectedCount; i++) { await bob.chat.send(channel, message) } // Run the pagination with peek and unreadOnly enabled, expecting 10 msgs let totalCount = 0 let lastPagination while (true) { const result: ReadResult = await alice1.chat.read(channel, { peek: true, unreadOnly: true, pagination: { num: 1, next: lastPagination ? lastPagination.next : undefined, }, }) totalCount += result.messages.length if (totalCount >= expectedCount) { break } lastPagination = result.pagination } expect(totalCount).toEqual(10) }) it('Throws an error if given an invalid channel', async (): Promise<void> => { expect(alice1.chat.read(invalidChannel)).rejects.toThrowError() }) }) describe('Chat send', (): void => { it('Sends a message to a certain channel and returns an empty promise', async (): Promise<void> => { await alice1.chat.send(channel, message) const result = await alice1.chat.read(channel, {peek: true}) expect(result.messages[0].sender.username).toEqual(alice1.myInfo()?.username) if (result.messages[0].content.type === 'text') { expect(result.messages[0].content.text?.body).toEqual(message.body) } else { expect(false).toBe(true) } }) it('Throws an error if given an invalid channel', async (): Promise<void> => { expect(alice1.chat.send(invalidChannel, message)).rejects.toThrowError() }) it('Throws an error if given an invalid message', async (): Promise<void> => { // @ts-ignore intentionally bad formatted message expect(alice1.chat.send(channel, invalidMessage)).rejects.toThrowError() }) }) describe('Gets messages in correct channels', (): void => { it(`Can act read/post in different channels concurrently`, async (): Promise<void> => { const channels: ChatChannel[] = [ { name: config.teams.acme.teamname, topicName: 'general', membersType: 'team', }, { name: config.teams.acme.teamname, topicName: 'singularitarians', membersType: 'team', }, {name: `${config.bots.alice1.username},${config.bots.bob1.username}`}, ] const okChecks: boolean[] = [] for (const channel of channels) { if (channel.topicName && channel.topicName !== 'general') { try { await alice1.chat.createChannel(channel) } catch (err) { /* may already be made */ } await bob.chat.joinChannel(channel) } } await timeout(1000) // concurrently watch and send to all of them for (const i in channels) { const channel = channels[i] bob.chat.watchChannelForNewMessages(channel, (message): void => { if (message.content.type !== 'text') { throw new Error('Expected text type') } if (message.content.text?.body === `c${i} test`) { if (okChecks[i]) { throw new Error('Uh oh, duplicate! ' + JSON.stringify(message)) } okChecks[i] = true } else { throw new Error('Got bad message: ' + JSON.stringify(message)) } }) alice1.chat.send(channel, {body: `c${i} test`}) } const allChecksOk = (): boolean => { for (const i in channels) { if (!okChecks[i]) { return false } } return true } await pollFor(allChecksOk) expect(allChecksOk()).toBe(true) }) it(`Can read and post even if own username missing from a DM channel name`, async (): Promise<void> => { const channelAlice = {name: config.bots.bob1.username} const channelBob = {name: config.bots.alice1.username} const body = 'Dearest Bob, how are you?' let incoming: MsgSummary | null = null const watcher: OnMessage = (message: MsgSummary): void => { incoming = message } bob.chat.watchChannelForNewMessages(channelBob, watcher) await timeout(500) await alice1.chat.send(channelAlice, {body}) await timeout(500) const inc = coerceMsgSummary(incoming) if (inc.content?.type !== 'text') { throw new Error('got a bad message') } else { expect(inc.content?.text?.body).toBe(body) } }) it(`Can read and post with usernames in any order`, async (): Promise<void> => { const channel1 = {name: `${config.bots.alice1.username},${config.bots.bob1.username}`} const channel2 = {name: `${config.bots.bob1.username},${config.bots.alice1.username}`} const channel3 = {name: `${channel2.name},${config.bots.charlie1.username}`} const body = 'Total protonic reversal. That would be bad.' let receipts = 0 const bobOnMessage = (message: MsgSummary): void => { if (message.content.type === 'text' && message.content.text?.body === body) { receipts++ } } bob.chat.watchChannelForNewMessages(channel1, bobOnMessage) bob.chat.watchChannelForNewMessages(channel2, bobOnMessage) await timeout(500) await alice1.chat.send(channel1, {body}) await timeout(500) expect(receipts).toBe(2) await alice1.chat.send(channel2, {body}) await timeout(500) expect(receipts).toBe(4) // channel 3 should not be included by bob since it's not watched await alice1.chat.send(channel3, {body}) await timeout(500) expect(receipts).toBe(4) }) }) describe('Chat createChannel, joinChannel, watchChannel, and leaveChannel', (): void => { it('Successfully performs the complete flow', async (): Promise<void> => { const teamChannel: ChatChannel = { name: config.teams.acme.teamname, public: false, topicType: 'chat', membersType: 'team', topicName: `sub-${Date.now()}`, } const generalChannel: ChatChannel = { name: config.teams.acme.teamname, public: false, topicType: 'chat', membersType: 'team', topicName: 'general', } const message = {body: `And she's buuuuuuy..ing a stairway....to heav-un.`} await alice1.chat.createChannel(teamChannel) await bob.chat.joinChannel(teamChannel) const read1 = await alice1.chat.read(teamChannel, { pagination: { num: 1, }, }) expect(read1.messages[0].content.type).toEqual('join') expect(read1.messages[0].sender.username).toEqual(config.bots.bob1.username) let bobMessageCount = 0 const bobOnMessage = async (): Promise<void> => { bobMessageCount++ } bob.chat.watchChannelForNewMessages(teamChannel, bobOnMessage) bob.chat.watchChannelForNewMessages(generalChannel, bobOnMessage) await timeout(500) await alice1.chat.send(generalChannel, message) await timeout(500) expect(bobMessageCount).toBe(1) /* only one of the watchers should've picked this up */ await alice1.chat.send(teamChannel, message) await timeout(500) expect(bobMessageCount).toBe(2) await bob.chat.leaveChannel(teamChannel) const read2 = await alice1.chat.read(teamChannel, { pagination: { num: 1, }, }) expect(read2.messages[0].content.type).toEqual('leave') expect(read2.messages[0].sender.username).toEqual(config.bots.bob1.username) await timeout(500) await alice1.chat.send(teamChannel, message) await timeout(500) expect(bobMessageCount).toBe(2) /* confirm bob is no longer listening */ }) }) describe('Chat react', (): void => { it('Allows a user to react to a valid message', async (): Promise<void> => { await alice1.chat.send(channel, message) let result = await alice1.chat.read(channel, {peek: true}) const messageToReactTo = result.messages[0] await bob.chat.react(channel, messageToReactTo.id, ':poop:') result = await alice1.chat.read(channel, {peek: true}) const reaction = result.messages[0] expect(reaction.id).toBe(messageToReactTo.id + 1) expect(reaction.content.type).toBe('reaction') expect(result.messages[1]?.reactions?.reactions?.poop?.users).toHaveProperty(config.bots.bob1.username) }) // it('Throws an error if given an invalid emoji', async () => { // await alice1.chat.send(channel, message) // const result = await alice1.chat.read(channel, {peek: true}) // const messageToReactTo = result.messages[0] // expect(bob.chat.react(channel, messageToReactTo.id, 'blah')).rejects.toThrowError() // }) // it("Throws an error if it cannot react to a message (e.g., it's not a reactable message type") }) describe('Chat attach', (): void => { const attachmentLocation = '/tmp/kb-attachment.txt' beforeAll( async (): Promise<void> => { await promisify(fs.writeFile)(attachmentLocation, 'This is a test file!') } ) afterAll( async (): Promise<void> => { await promisify(fs.unlink)(attachmentLocation) } ) it('Attaches and sends a file on the filesystem', async (): Promise<void> => { await alice1.chat.attach(channel, attachmentLocation) const result = await alice1.chat.read(channel) expect(result.messages[0].sender.username).toEqual(alice1.myInfo()?.username) expect(result.messages[0].content.type).toBe('attachment') expect(result.messages[0].content).toHaveProperty('attachment') }) it('Throws an error if given an invalid channel', async () => { expect(alice1.chat.attach(invalidChannel, attachmentLocation)).rejects.toThrowError() }) it('Throws an error if the file does not exist', async () => { expect(alice1.chat.attach(channel, '/fake-attachment.png')).rejects.toThrowError() }) }) describe('Chat download', (): void => { const downloadLocation = '/tmp/kb-downloaded-file' it('Downloads a file and saves it on the filesystem', async (): Promise<void> => { // Send a file const attachmentLocation = '/tmp/kb-attachment.txt' const attachmentContent = 'Test attachment file' await promisify(fs.writeFile)(attachmentLocation, attachmentContent) await alice1.chat.attach(channel, attachmentLocation) // Read the file const result = await alice1.chat.read(channel) await alice1.chat.download(channel, result.messages[0].id, downloadLocation) const downloadContents = await promisify(fs.readFile)(downloadLocation) expect(downloadContents.toString()).toBe(attachmentContent) // Delete the created files await promisify(fs.unlink)(attachmentLocation) await promisify(fs.unlink)(downloadLocation) }) it('Throws an error if given an invalid channel', async (): Promise<void> => { const result = await alice1.chat.read(channel) const attachments = result.messages.filter(message => message.content.type === 'attachment') expect(alice1.chat.download(invalidChannel, attachments[0].id, downloadLocation)).rejects.toThrowError() }) it('Throws an error if given a non-attachment message', async (): Promise<void> => { await alice1.chat.send(channel, message) const result = await alice1.chat.read(channel) expect(alice1.chat.download(channel, result.messages[0].id, '/tmp/attachment')).rejects.toThrowError() }) }) describe('Chat delete', (): void => { it('Deletes a message to a certain channel and returns an empty promise', async (): Promise<void> => { await alice1.chat.send(channel, message) // Send a message const result = await alice1.chat.read(channel, { peek: true, }) expect(result.messages[0].sender.username).toEqual(alice1.myInfo()?.username) if (result.messages[0].content.type !== 'text') { throw new Error('Expected text type but got something else') } else { expect(result.messages[0].content.text?.body).toEqual(message.body) } const {id} = result.messages[0] await alice1.chat.delete(channel, id) // Send a message const newResult = await alice1.chat.read(channel, { peek: true, }) expect(newResult.messages[0].id).toEqual(id + 1) if (newResult.messages[0].content.type !== 'delete') { throw new Error('expected delete message type') } else { expect(newResult.messages[0].content.delete?.messageIDs).toContain(id) expect(newResult.messages[0].content.delete?.messageIDs).toHaveLength(1) } expect(newResult.messages[1].id).toEqual(id - 1) }) it('Throws an error if given an invalid channel', async (): Promise<void> => { await alice1.chat.send(channel, message) const result = await alice1.chat.read(channel, { peek: true, }) const {id} = result.messages[0] expect(alice1.chat.delete(invalidChannel, id)).rejects.toThrowError() }) it('Throws an error if given an invalid id', async (): Promise<void> => { expect(alice1.chat.delete(channel, -1)).rejects.toThrowError() }) /* TODO: currently in DM's both parties are considered admins of the chat and technically have the power to delete messages from either side, a feature which isn't currently exposed in the GUI. we will likely turn this off in the form of access control on the server, and then this test will pass. it('Throws an error if it cannot delete the message (e.g., someone else wrote it)', async () => { await bob.chat.send(channel, message) const result = await alice1.chat.read(channel, { peek: true, }) const {id} = result.messages[0] expect(alice1.chat.delete(channel, id)).rejects.toThrowError() }) */ }) describe('Command advertisements', (): void => { it('Should be able to clear, publish and then lookup', async (): Promise<void> => { await alice1.chat.clearCommands() await alice1.chat.advertiseCommands({ advertisements: [ { type: BotCommandsAdvertisementTyp.PUBLIC, commands: [ { name: '!helloworld', description: 'sample description', usage: 'test', }, ], }, ], }) const listBeforeClear = await bob.chat.listCommands({ channel: channel, }) expect(listBeforeClear.commands).toContainEqual({ name: '!helloworld', description: 'sample description', usage: 'test', username: alice1.myInfo()?.username, }) await alice1.chat.clearCommands() const listAfterClear = await bob.chat.listCommands({ channel: channel, }) expect(listAfterClear.commands.length).toBe(0) }) }) describe('watchChannelForNewMessages', (): void => { it('Can have bots say hello to each other in a team', async (): Promise<void> => { let ALICE_IS_SATISFIED = false let BOB_IS_SATISFIED = false alice1.chat.watchChannelForNewMessages(teamChannel, (message): void => { if (message.content.type === 'text' && message.content.text?.body === 'hello alice1') { ALICE_IS_SATISFIED = true } }) bob.chat.watchChannelForNewMessages(teamChannel, (message): void => { if (message.content.type === 'text' && message.content.text?.body === 'hello bob') { BOB_IS_SATISFIED = true } }) await alice1.chat.send(teamChannel, {body: 'hello bob'}) await bob.chat.send(teamChannel, {body: 'hello alice1'}) await pollFor((): boolean => ALICE_IS_SATISFIED && BOB_IS_SATISFIED) expect(ALICE_IS_SATISFIED).toBe(true) expect(BOB_IS_SATISFIED).toBe(true) }) it("Doesn't pick up its own messages from the same device", async (): Promise<void> => { const messageText = 'Ever thus to deadbeats, Lebowski' let noticedMessages = 0 alice1.chat.watchChannelForNewMessages(teamChannel, (message): void => { if (message.content.type === 'text' && message.content.text?.body === messageText) { noticedMessages++ } }) await alice1.chat.send(teamChannel, {body: messageText}) await timeout(3000) expect(noticedMessages).toBe(0) }) }) describe('watchAllChannelsForNewMessages', (): void => { const testTwoBotsCounting = async (bot1: Bot, bot2: Bot): Promise<void> => { const stopAt = 10 const convoCode = crypto.randomBytes(8).toString('hex') const directChannel = {name: `${bot1.myInfo()?.username},${bot2.myInfo()?.username}`} let totalMessagesSeen = 0 let highestReached = 0 const onMessageForBot = (bot: Bot): OnMessage => { const onMessage = async (message: MsgSummary): Promise<void> => { if (message.content.type === 'text') { const body = message.content.text?.body ?? '' if (body.indexOf(convoCode) !== -1) { totalMessagesSeen++ const num = parseInt(body.replace(convoCode, '').trim()) highestReached = Math.max(num, highestReached) if (num < stopAt) { const reply = {body: `${convoCode} ${num + 1}`} await bot.chat.send(message.channel, reply) } } } } return onMessage } bot1.chat.watchAllChannelsForNewMessages(onMessageForBot(bot1)) bot2.chat.watchAllChannelsForNewMessages(onMessageForBot(bot2)) const message = {body: `${convoCode} 1`} await bot1.chat.send(directChannel, message) await pollFor((): boolean => highestReached === stopAt) expect(totalMessagesSeen).toBe(stopAt) } it('can have 2 users count together', async (): Promise<void> => testTwoBotsCounting(alice1, bob)) it('can have 1 user count across 2 devices', async (): Promise<void> => testTwoBotsCounting(alice1, alice2)) }) })
it('Lists only unread conversations if given the option', async (): Promise<void> => { await bob.chat.send(channel, message) await timeout(500) const conversations = await alice1.chat.list({unreadOnly: true})
mockgen_test.go
package main import ( "reflect" "testing" "github.com/golang/mock/mockgen/model" ) func
(t *testing.T) { for _, testCase := range []struct { name string method *model.Method expected []string }{ { name: "NamedArg", method: &model.Method{ In: []*model.Parameter{ { Name: "firstArg", Type: &model.NamedType{Type: "int"}, }, { Name: "secondArg", Type: &model.NamedType{Type: "string"}, }, }, }, expected: []string{"firstArg", "secondArg"}, }, { name: "NotNamedArg", method: &model.Method{ In: []*model.Parameter{ { Name: "", Type: &model.NamedType{Type: "int"}, }, { Name: "", Type: &model.NamedType{Type: "string"}, }, }, }, expected: []string{"arg0", "arg1"}, }, { name: "MixedNameArg", method: &model.Method{ In: []*model.Parameter{ { Name: "firstArg", Type: &model.NamedType{Type: "int"}, }, { Name: "_", Type: &model.NamedType{Type: "string"}, }, }, }, expected: []string{"firstArg", "arg1"}, }, } { t.Run(testCase.name, func(t *testing.T) { g := generator{} result := g.getArgNames(testCase.method) if !reflect.DeepEqual(result, testCase.expected) { t.Fatalf("expected %s, got %s", result, testCase.expected) } }) } } func Test_createPackageMap(t *testing.T) { tests := []struct { name string importPath string wantPackageName string wantOK bool }{ {"golang package", "context", "context", true}, {"third party", "golang.org/x/tools/present", "present", true}, //{"modules", "rsc.io/quote/v3", "quote", true}, {"fail", "this/should/not/work", "", false}, } var importPaths []string for _, t := range tests { importPaths = append(importPaths, t.importPath) } packages := createPackageMap(importPaths) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { gotPackageName, gotOk := packages[tt.importPath] if gotPackageName != tt.wantPackageName { t.Errorf("createPackageMap() gotPackageName = %v, wantPackageName = %v", gotPackageName, tt.wantPackageName) } if gotOk != tt.wantOK { t.Errorf("createPackageMap() gotOk = %v, wantOK = %v", gotOk, tt.wantOK) } }) } }
TestGetArgNames
git-repository-details.tsx
import { Common, Renderer } from "@k8slens/extensions"; import { kebabCase } from "lodash"; import React, { ReactNode } from "react"; import { GitRepository } from "../../apis/source/git-repository"; import { ExternalLink } from "../external-link"; import { Link } from "react-router-dom"; export interface FluxGitRepositoryDetailsProps extends Renderer.Component.KubeObjectDetailsProps<GitRepository> {} export class GitRepositoryDetails extends React.Component< FluxGitRepositoryDetailsProps, any > { render(): ReactNode { const { object: fluxGitRepository } = this.props; if (!fluxGitRepository) return null; return ( <div className="FluxGitRepository"> <Renderer.Component.KubeObjectMeta object={fluxGitRepository} /> <Renderer.Component.DrawerItem name="Status"> <span className={Common.Util.cssNames( "status", kebabCase(fluxGitRepository.getStatusMessage()) )} > {fluxGitRepository.getStatusMessage()} </span> </Renderer.Component.DrawerItem> <Renderer.Component.DrawerItem name="Reconciled At"> {fluxGitRepository.getReconciledAge()} ago ( {fluxGitRepository.status.artifact?.lastUpdateTime}) </Renderer.Component.DrawerItem> <Renderer.Component.DrawerItem name="URL"> <ExternalLink url={fluxGitRepository.spec.url} /> </Renderer.Component.DrawerItem> {fluxGitRepository.spec.secretRef && ( <Renderer.Component.DrawerItem name="Secret"> <Link to={fluxGitRepository.getSecretDetailUrl()}> {fluxGitRepository.spec.secretRef.name} </Link> </Renderer.Component.DrawerItem> )} <Renderer.Component.DrawerItem name="Interval"> {fluxGitRepository.spec.interval} </Renderer.Component.DrawerItem> {fluxGitRepository.spec.timeout && ( <Renderer.Component.DrawerItem name="Timeout"> {fluxGitRepository.spec.timeout} </Renderer.Component.DrawerItem> )} {fluxGitRepository.spec.ref && ( <React.Fragment> <Renderer.Component.DrawerItem name="Branch" hidden={!fluxGitRepository.spec.ref.branch} > {fluxGitRepository.spec.ref.branch} </Renderer.Component.DrawerItem> <Renderer.Component.DrawerItem name="Tag" hidden={!fluxGitRepository.spec.ref.tag} > {fluxGitRepository.spec.ref.tag} </Renderer.Component.DrawerItem> <Renderer.Component.DrawerItem name="Semver" hidden={!fluxGitRepository.spec.ref.semver} > {fluxGitRepository.spec.ref.semver} </Renderer.Component.DrawerItem> <Renderer.Component.DrawerItem name="Commit" hidden={!fluxGitRepository.spec.ref.commit} > {fluxGitRepository.spec.ref.commit} </Renderer.Component.DrawerItem> </React.Fragment> )} {fluxGitRepository.spec.verify && ( <Renderer.Component.DrawerItem name="Verify" hidden={!fluxGitRepository.spec.verify.mode} > {fluxGitRepository.spec.verify.mode} </Renderer.Component.DrawerItem> )} {fluxGitRepository.spec.ignore && (
<Renderer.Component.Input multiLine theme="round-black" className="box grow" value={fluxGitRepository.spec.ignore} /> </Renderer.Component.DrawerItem> )} {fluxGitRepository.spec.gitImplementation && ( <Renderer.Component.DrawerItem name="Git Implementation"> {fluxGitRepository.spec.gitImplementation} </Renderer.Component.DrawerItem> )} {fluxGitRepository.spec.recurseSubmodules && ( <Renderer.Component.DrawerItem name="Recurse submodules"> {fluxGitRepository.spec.recurseSubmodules} </Renderer.Component.DrawerItem> )} {fluxGitRepository.spec.include && ( <Renderer.Component.DrawerItem name="Includes"> {fluxGitRepository.spec.include.map(repo => { // TODO prettify return ( <Renderer.Component.Input multiLine theme="round-black" className="box grow" value={JSON.stringify(repo)} /> ); })} </Renderer.Component.DrawerItem> )} { // TODO accessFrom } </div> ); } }
<Renderer.Component.DrawerItem name="Ignore">
authorizator.go
package jwt import ( "gin-vue-admin/models" "github.com/gin-gonic/gin" ) //IAuthorizator 授权规则接口 type IAuthorizator interface { HandleAuthorizator(data interface{}, c *gin.Context) bool } //AdminAuthorizator 管理员授权规则 type AdminAuthorizator struct { } //HandleAuthorizator 处理管理员授权规则 func (*AdminAuthorizator) HandleAuthorizator(data interface{}, c *gin.Context) bool { if v, ok := data.(*models.UserRole); ok { for _, itemRole := range v.UserRoles {
规则 type TestAuthorizator struct { } //HandleAuthorizator 处理测试用户授权规则 func (*TestAuthorizator) HandleAuthorizator(data interface{}, c *gin.Context) bool { if v, ok := data.(*models.UserRole); ok && v.UserName == "test" { return true } return false } //AllUserAuthorizator 普通用户授权规则 type AllUserAuthorizator struct { } //HandleAuthorizator 处理普通用户授权规则 func (*AllUserAuthorizator) HandleAuthorizator(data interface{}, c *gin.Context) bool { return true }
if itemRole.Value == "admin" { return true } } } return false } //TestAuthorizator 测试用户授权
queue.js
'use strict'; /** * @format * @flow */ import moment from 'moment'; import updateObject from '../utils/updateObject'; import getUnique from '../utils/getUnique'; import * as types from '../actions/queue/types'; import * as entitiesTypes from '../actions/entities/types'; import {type Firebase} from '../utils/firebaseTypes'; import {type Action as AlbumAction} from './albums'; import {type Action as ArtistAction} from './artists'; import {type Action as PlayerAction} from './player'; import {type Action as TrackAction} from './tracks'; import {type Action as EntitiesAction} from './entities'; export const lastUpdated: string = moment().format('ddd, MMM D, YYYY, h:mm:ss a'); type DispatchAction = | Action | AlbumAction | ArtistAction | PlayerAction | TrackAction | EntitiesAction; type GetState = () => State; type PromiseAction = Promise<DispatchAction>; type ThunkAction = (dispatch: Dispatch, getState: GetState, firebase: Firebase) => any; type Dispatch = (action: DispatchAction | PromiseAction | ThunkAction | Array<Action>) => any; type Updates = { totalLikes?: number, changeLike?: boolean, }; type Context = { +id?: string, +name?: string, +type?: string, +displayName?: string, +position?: string | number, +total?: number, +tracks?: Array<string>, }; type QueueTrack = { +id?: ?string, +trackID?: ?string, +userID?: ?string, +totalLikes?: number, +liked?: boolean, +seconds?: ?number, +nanoseconds?: ?number, +isCurrent?: boolean, }; type Action = { +type?: string, +error?: Error, +context?: Context, +tracks?: {+[id: string]: QueueTrack}, +track?: QueueTrack, +queueID?: string, +queue?: ?Array<QueueTrack>, +contextQueue?: ?Array<string>, +unsubscribe?: () => void, +updates?: Updates, +removeTrack?: boolean, +updates?: State, +item?: QueueTrack, }; type State = { +lastUpdated?: string, +userQueue?: Array<QueueTrack>, +totalUserQueue?: number, +contextQueue?: Array<string>, +fetching?: Array<string>, +liking?: Array<string>, +deleting?: Array<string>, +failed?: Array<string>, +queueing?: boolean, +unsubscribe?: ?() => any, +error?: ?Error, +context?: Context, }; export type { GetState, PromiseAction, ThunkAction, Dispatch, Updates, Context, QueueTrack, Action, State, }; /** * @callback unsubscribe */ /** * @constant * @alias singleQueueState * @type {object} * * @property {string} id=null The Brassroots id of the queue track * @property {string} trackID=null The Spotify id of the queue track * @property {string} userID=null The Brassroots id of the user who queued the track * @property {number} totalLikes=0 The total amount of likes the queue track has * @property {boolean} liked=false Whether the current user has liked the queue track * @property {number} seconds=0 The seconds when the track was added * @property {number} nanoseconds=0 The nanoseconds wehn the track was added * @property {boolean} isCurrent=false Whether the track is the currently playing track */ const singleState: QueueTrack = { id: null, trackID: null, userID: null, totalLikes: 0, liked: false, seconds: 0, nanoseconds: 0, isCurrent: false, }; /** * @constant * @alias queueState * @type {object} * * @property {string} lastUpdated The date/time the queue was last updated * @property {string[]} userQueue The Brassroots ids of the tracks next up in the queue * @property {number} totalUserQueue=0 The total amount of tracks in the user queue * @property {string[]} contextQueue The Spotify ids of the tracks next up in the queue from the context * @property {boolean} fetching=[] Whether the current user is fetching any entity type * @property {string[]} liking The Brassroots ids of the tracks in the queue the current user is liking * @property {string[]} deleting The Brassroots ids of the tracks in the queue the current user is deleting * @property {string[]} failed The Brassroots ids of the tracks in the queue which experienced an error * @property {boolean} queueing=false Whether the current user is queueing a track * @property {unsubscribe} unsubscribe=null The function to invoke to unsubscribe from the queue listener * @property {Error} error=null The error related to queue actions * @property {object} context The current context of the queue * @property {string} context.id The id of the current context * @property {string} context.name The name of the current context * @property {string} context.type The type of item the current context is * @property {string} context.displayName The display name of the current context * @property {(number|string)} context.position=0 The position/cursor in the context the current track is located */ export const initialState: State = { lastUpdated, userQueue: [], totalUserQueue: 0, contextQueue: [], fetching: [], liking: [], deleting: [], failed: [], queueing: false, unsubscribe: null, error: null, context: { id: '', name: '', type: '', displayName: '', position: 0, }, }; export function queueTrack( state: QueueTrack = singleState, action: Action, ): QueueTrack { switch (action.type) { case entitiesTypes.ADD_ENTITIES: return updateObject(state, {...(action.item ? action.item : {})}); default: return state; } } /** * Updates any of the values in the queue state * * @function update * * @author Aldo Gonzalez <[email protected]> * * @param {object} state The Redux state * @param {object} action The Redux action * @param {string} action.type The type of Redux action * @param {object} action.updates The updates to make to the state * @param {string} type The type of add/remove from the fetching array * * @returns {object} The state updated with the new information */ function update(
const {userQueue, totalUserQueue, context, unsubscribe, liking, deleting, failed, fetching} = state; const add: boolean = typeof action.type === 'string' && action.type.includes('REQUEST'); const haveError: boolean = typeof action.type === 'string' && action.type.includes('FAILURE'); const uniqueQueue = action.queue && userQueue ? getUnique([...userQueue, ...action.queue], 'id') : []; const newQueue = action.queue && userQueue ? uniqueQueue.sort((a, b) => { if ( typeof a.seconds === 'number' && typeof a.nanoseconds === 'number' && typeof b.seconds === 'number' && typeof b.nanoseconds === 'number' ) { const {seconds: secA, nanoseconds: nanA} = a; const {seconds: secB, nanoseconds: nanB} = b; return secA < secB ? -1 : secA > secB ? 1 : nanA < nanB ? -1 : nanA > nanB ? 1 : 0; } else { return 0; } }) : userQueue ? [...userQueue] : []; const updates: State = ( context && Array.isArray(userQueue) && Array.isArray(liking) && Array.isArray(deleting) && Array.isArray(failed) && Array.isArray(fetching) && typeof totalUserQueue === 'number' && action.type ) ? { ...(action.updates ? action.updates : {}), lastUpdated, fetching: add && type ? fetching.concat(type) : type ? fetching.filter(t => t !== type) : fetching, error: haveError ? action.error : null, userQueue: action.type === 'REMOVE_QUEUE_TRACK' && typeof action.queueID === 'string' ? userQueue.filter(o => o.id !== action.queueID) : [...newQueue], totalUserQueue: action.type === 'REMOVE_QUEUE_TRACK' ? totalUserQueue - 1 : newQueue.length, liking: type === 'toggle' && add && action.queueID ? liking.concat(action.queueID) : action.type.includes('TOGGLE_TRACK_LIKE') && action.queueID ? liking.filter(id => id !== action.queueID) : [...liking], deleting: type === 'delete' && add && action.queueID ? deleting.concat(action.queueID) : action.type.includes('DELETE_QUEUE') && action.queueID ? deleting.filter(id => id !== action.queueID) : [...deleting], failed: (type === 'toggle' || type === 'delete') && haveError && action.queueID ? failed.concat(action.queueID) : (type === 'toggle' || type === 'delete') && typeof action.queueID === 'string' ? failed.filter(id => id !== action.queueID) : [...failed], unsubscribe: action.type === 'STOP_QUEUE_LISTENER_SUCCESS' ? null : typeof action.unsubscribe === 'function' ? action.unsubscribe : unsubscribe, context: action.updates && action.updates.context ? updateObject(context, action.updates.context) : {...context}, } : {}; return updateObject(state, updates); } export default function reducer( state: State = initialState, action: Action = {}, ): State { if (typeof action.type === 'string') { switch (action.type) { case types.DELETE_QUEUE_TRACK_REQUEST: case types.DELETE_QUEUE_TRACK_SUCCESS: case types.DELETE_QUEUE_TRACK_FAILURE: return update(state, action, 'delete'); case types.GET_USER_QUEUE_REQUEST: case types.GET_USER_QUEUE_FAILURE: return update(state, action, 'queue'); case types.QUEUE_TRACK_REQUEST: return updateObject(state, {queueing: true, error: null}); case types.QUEUE_TRACK_SUCCESS: return updateObject(state, {queueing: false, error: null}); case types.QUEUE_TRACK_FAILURE: return updateObject(state, {error: action.error, queueing: false}); case types.RESET_QUEUE: return initialState; case types.STOP_QUEUE_LISTENER_REQUEST: return state; case types.STOP_QUEUE_LISTENER_SUCCESS: case types.STOP_QUEUE_LISTENER_FAILURE: return update(state, action, 'queue'); case types.TOGGLE_TRACK_LIKE_REQUEST: case types.TOGGLE_TRACK_LIKE_SUCCESS: case types.TOGGLE_TRACK_LIKE_FAILURE: return update(state, action, 'toggle'); case types.GET_USER_QUEUE_SUCCESS: case types.REMOVE_QUEUE_TRACK: case types.UPDATE_QUEUE: return update(state, action); default: return state; } }; return state; }
state: State, action: Action, type?: string, ): State {
receive_message_stream.py
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import asyncio from ._test_base import _ReceiveTest class
(_ReceiveTest): def run_sync(self): count = 0 if self.args.peeklock: for msg in self.receiver: if count >= self.args.num_messages: break count += 1 msg.complete() else: for msg in self.receiver: if count >= self.args.num_messages: break count += 1 async def run_async(self): count = 0 if self.args.peeklock: async for msg in self.async_receiver: if count >= self.args.num_messages: break count += 1 await msg.complete() else: async for msg in self.async_receiver: if count >= self.args.num_messages: break count += 1
LegacyReceiveMessageStreamTest
userConfigReader.ts
import signale from 'signale' import { join } from 'path' import { cwd } from '@/src/core/env' export interface IConfig { [index: string]: any } export function
(): IConfig { try { // tslint:disable-next-line: non-literal-require const mod = require(join(cwd(), '.bootrc.ts')) return mod.default || mod } catch (error) { signale.warn('.bootrc.ts not found, preset config used') return {} } }
getRawUserConfig
defaults_test.go
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1beta1_test import ( "reflect" "testing" "k8s.io/api/scheduling/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/api/legacyscheme" apiv1 "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" featuregatetesting "k8s.io/component-base/featuregate/testing" // enforce that all types are installed _ "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/features" ) func roundTrip(t *testing.T, obj runtime.Object) runtime.Object
func TestSetDefaultPreempting(t *testing.T) { priorityClass := &v1beta1.PriorityClass{} // set NonPreemptingPriority true defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NonPreemptingPriority, true)() output := roundTrip(t, runtime.Object(priorityClass)).(*v1beta1.PriorityClass) if output.PreemptionPolicy == nil || *output.PreemptionPolicy != apiv1.PreemptLowerPriority { t.Errorf("Expected PriorityClass.Preempting value: %+v\ngot: %+v\n", apiv1.PreemptLowerPriority, output.PreemptionPolicy) } }
{ codec := legacyscheme.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion) data, err := runtime.Encode(codec, obj) if err != nil { t.Errorf("%v\n %#v", err, obj) return nil } obj2, err := runtime.Decode(codec, data) if err != nil { t.Errorf("%v\nData: %s\nSource: %#v", err, string(data), obj) return nil } obj3 := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(runtime.Object) err = legacyscheme.Scheme.Convert(obj2, obj3, nil) if err != nil { t.Errorf("%v\nSource: %#v", err, obj2) return nil } return obj3 }
tidebit.py
# -*- coding: utf-8 -*- # PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN: # https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code from ccxt.base.exchange import Exchange from ccxt.base.errors import ExchangeError from ccxt.base.errors import InsufficientFunds from ccxt.base.errors import OrderNotFound class tidebit (Exchange): def describe(self): return self.deep_extend(super(tidebit, self).describe(), { 'id': 'tidebit', 'name': 'TideBit', 'countries': ['HK'], 'rateLimit': 1000, 'version': 'v2', 'has': { 'fetchDepositAddress': True, 'CORS': True, 'fetchTickers': True, 'fetchOHLCV': True, 'withdraw': True, }, 'timeframes': { '1m': '1', '5m': '5', '15m': '15', '30m': '30', '1h': '60', '2h': '120', '4h': '240', '12h': '720', '1d': '1440', '3d': '4320', '1w': '10080', }, 'urls': { 'logo': 'https://user-images.githubusercontent.com/1294454/39034921-e3acf016-4480-11e8-9945-a6086a1082fe.jpg', 'api': 'https://www.tidebit.com', 'www': 'https://www.tidebit.com', 'doc': [ 'https://www.tidebit.com/documents/api/guide', 'https://www.tidebit.com/swagger/#/default', ], 'referral': 'http://bit.ly/2IX0LrM', }, 'api': { 'public': { 'get': [ 'markets', 'tickers', 'tickers/{market}', 'timestamp', 'trades', 'trades/{market}', 'order_book', 'order', 'k_with_pending_trades', 'k', 'depth', ], 'post': [], }, 'private': { 'get': [ 'addresses/{address}', 'deposits/history', 'deposits/get_deposit', 'deposits/deposit_address', 'historys/orders', 'historys/vouchers', 'historys/accounts', 'historys/snapshots', 'linkage/get_status', 'members/me', 'order', 'orders', 'partners/orders/{id}/trades', 'referral_commissions/get_undeposited', 'referral_commissions/get_graph_data', 'trades/my', 'withdraws/bind_account_list', 'withdraws/get_withdraw_account', 'withdraws/fetch_bind_info', ], 'post': [ 'deposits/deposit_cash', 'favorite_markets/update', 'order/delete', 'orders', 'orders/multi', 'orders/clear', 'referral_commissions/deposit', 'withdraws/apply', 'withdraws/bind_bank', 'withdraws/bind_address', ], }, }, 'fees': { 'trading': { 'tierBased': False, 'percentage': True, 'maker': 0.2 / 100, 'taker': 0.2 / 100, }, 'funding': { 'tierBased': False, 'percentage': True, 'withdraw': {}, # There is only 1% fee on withdrawals to your bank account. }, }, 'exceptions': { '2002': InsufficientFunds, '2003': OrderNotFound, }, }) def fetch_deposit_address(self, code, params={}): self.load_markets() currency = self.currency(code) request = { 'currency': currency['id'], } response = self.privateGetDepositAddress(self.extend(request, params)) if 'success' in response: if response['success']: address = self.safe_string(response, 'address') tag = self.safe_string(response, 'addressTag') return { 'currency': code, 'address': self.check_address(address), 'tag': tag, 'info': response, } def fetch_markets(self, params={}): response = self.publicGetMarkets(params) result = [] for i in range(0, len(response)): market = response[i] id = self.safe_string(market, 'id') symbol = self.safe_string(market, 'name') baseId, quoteId = symbol.split('/') base = self.safe_currency_code(baseId) quote = self.safe_currency_code(quoteId) result.append({ 'id': id, 'symbol': symbol, 'base': base, 'quote': quote, 'baseId': baseId, 'quoteId': quoteId, 'info': market, }) return result def fetch_balance(self, params={}): self.load_markets() response = self.privateGetMembersMe(params) balances = self.safe_value(response, 'accounts') result = {'info': balances} for i in range(0, len(balances)): balance = balances[i] currencyId = self.safe_string(balance, 'currency') code = self.safe_currency_code(currencyId) account = self.account() account['free'] = self.safe_float(balance, 'balance') account['used'] = self.safe_float(balance, 'locked') result[code] = account return self.parse_balance(result) def fetch_order_book(self, symbol, limit=None, params={}): self.load_markets() market = self.market(symbol) request = { 'market': market['id'], } if limit is not None: request['limit'] = limit # default = 300 request['market'] = market['id'] response = self.publicGetDepth(self.extend(request, params)) timestamp = self.safe_timestamp(response, 'timestamp') return self.parse_order_book(response, timestamp) def parse_ticker(self, ticker, market=None): timestamp = self.safe_timestamp(ticker, 'at') ticker = self.safe_value(ticker, 'ticker', {}) symbol = None if market is not None: symbol = market['symbol'] last = self.safe_float(ticker, 'last') return { 'symbol': symbol, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'high': self.safe_float(ticker, 'high'), 'low': self.safe_float(ticker, 'low'), 'bid': self.safe_float(ticker, 'buy'), 'ask': self.safe_float(ticker, 'sell'), 'bidVolume': None, 'askVolume': None, 'vwap': None, 'open': None, 'close': last, 'last': last, 'change': None, 'percentage': None, 'previousClose': None, 'average': None, 'baseVolume': self.safe_float(ticker, 'vol'), 'quoteVolume': None, 'info': ticker, } def fetch_tickers(self, symbols=None, params={}): self.load_markets() tickers = self.publicGetTickers(params) ids = list(tickers.keys()) result = {} for i in range(0, len(ids)): id = ids[i] market = None symbol = id if id in self.markets_by_id: market = self.markets_by_id[id] symbol = market['symbol'] else: baseId = id[0:3] quoteId = id[3:6] base = self.safe_currency_code(baseId) quote = self.safe_currency_code(quoteId) symbol = base + '/' + quote ticker = tickers[id] result[symbol] = self.parse_ticker(ticker, market) return result def fetch_ticker(self, symbol, params={}): self.load_markets() market = self.market(symbol) request = { 'market': market['id'], } response = self.publicGetTickersMarket(self.extend(request, params)) return self.parse_ticker(response, market) def parse_trade(self, trade, market=None): timestamp = self.parse8601(self.safe_string(trade, 'created_at')) id = self.safe_string(trade, 'id') price = self.safe_float(trade, 'price') amount = self.safe_float(trade, 'volume') cost = self.safe_float(trade, 'funds') symbol = None if market is not None: symbol = market['symbol'] return { 'id': id, 'info': trade, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'symbol': symbol, 'type': None, 'side': None, 'order': None, 'takerOrMaker': None, 'price': price, 'amount': amount, 'cost': cost, 'fee': None, } def fetch_trades(self, symbol, since=None, limit=None, params={}): self.load_markets() market = self.market(symbol) request = { 'market': market['id'], } response = self.publicGetTrades(self.extend(request, params)) return self.parse_trades(response, market, since, limit) def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None): return [ ohlcv[0] * 1000, ohlcv[1], ohlcv[2], ohlcv[3], ohlcv[4], ohlcv[5], ] def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}): self.load_markets() market = self.market(symbol) if limit is None: limit = 30 # default is 30 request = { 'market': market['id'], 'period': self.timeframes[timeframe], 'limit': limit, } if since is not None: request['timestamp'] = int(since / 1000) else: request['timestamp'] = 1800000 response = self.publicGetK(self.extend(request, params)) if response == 'null': return [] return self.parse_ohlcvs(response, market, timeframe, since, limit) def parse_order_status(self, status):
def parse_order(self, order, market=None): symbol = None if market is not None: symbol = market['symbol'] else: marketId = order['market'] symbol = self.markets_by_id[marketId]['symbol'] timestamp = self.parse8601(self.safe_string(order, 'created_at')) status = self.parse_order_status(self.safe_string(order, 'state')) id = self.safe_string(order, 'id') type = self.safe_string(order, 'ord_type') side = self.safe_string(order, 'side') price = self.safe_float(order, 'price') amount = self.safe_float(order, 'volume') filled = self.safe_float(order, 'executed_volume') remaining = self.safe_float(order, 'remaining_volume') cost = None if price is not None: if filled is not None: cost = price * filled return { 'id': id, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'lastTradeTimestamp': None, 'status': status, 'symbol': symbol, 'type': type, 'side': side, 'price': price, 'amount': amount, 'filled': filled, 'remaining': remaining, 'cost': cost, 'trades': None, 'fee': None, 'info': order, } def create_order(self, symbol, type, side, amount, price=None, params={}): self.load_markets() request = { 'market': self.market_id(symbol), 'side': side, 'volume': str(amount), 'ord_type': type, } if type == 'limit': request['price'] = str(price) response = self.privatePostOrders(self.extend(request, params)) market = self.markets_by_id[response['market']] return self.parse_order(response, market) def cancel_order(self, id, symbol=None, params={}): self.load_markets() request = { 'id': id, } result = self.privatePostOrderDelete(self.extend(request, params)) order = self.parse_order(result) status = self.safe_string(order, 'status') if status == 'closed' or status == 'canceled': raise OrderNotFound(self.id + ' ' + self.json(order)) return order def withdraw(self, code, amount, address, tag=None, params={}): self.check_address(address) self.load_markets() currency = self.currency(code) id = self.safe_string(params, 'id') if id is None: raise ExchangeError(self.id + ' withdraw() requires an extra `id` param(withdraw account id according to withdraws/bind_account_list endpoint') request = { 'id': id, 'currency_type': 'coin', # or 'cash' 'currency': currency['id'], 'body': amount, # 'address': address, # they don't allow withdrawing to direct addresses? } if tag is not None: request['memo'] = tag result = self.privatePostWithdrawsApply(self.extend(request, params)) return { 'info': result, 'id': None, } def nonce(self): return self.milliseconds() def encode_params(self, params): return self.urlencode(self.keysort(params)) def sign(self, path, api='public', method='GET', params={}, headers=None, body=None): request = '/' + 'api/' + self.version + '/' + self.implode_params(path, params) + '.json' query = self.omit(params, self.extract_params(path)) url = self.urls['api'] + request if api == 'public': if query: url += '?' + self.urlencode(query) else: self.check_required_credentials() nonce = str(self.nonce()) sortedByKey = self.keysort(self.extend({ 'access_key': self.apiKey, 'tonce': nonce, }, params)) query = self.urlencode(sortedByKey) payload = method + '|' + request + '|' + query signature = self.hmac(self.encode(payload), self.encode(self.secret)) suffix = query + '&signature=' + signature if method == 'GET': url += '?' + suffix else: body = suffix headers = {'Content-Type': 'application/x-www-form-urlencoded'} return {'url': url, 'method': method, 'body': body, 'headers': headers} def handle_errors(self, code, reason, url, method, headers, body, response): if code == 400: error = self.safe_value(response, 'error') errorCode = self.safe_string(error, 'code') feedback = self.id + ' ' + self.json(response) exceptions = self.exceptions if errorCode in exceptions: raise exceptions[errorCode](feedback) # fallback to default error handler
statuses = { 'done': 'closed', 'wait': 'open', 'cancel': 'canceled', } return self.safe_string(statuses, status, status)
main.go
package lister import ( "github.com/trek10inc/awsets/context" "github.com/trek10inc/awsets/resource" ) var listers = make([]Lister, 0) type Lister interface { Types() []resource.ResourceType List(ctx context.AWSetsCtx) (*resource.Group, error) } func AllListers() []Lister { return listers } func Paginator(f func(*string) (*string, error)) error { var nt *string for { t, err := f(nt) if err != nil
if t == nil { break } nt = t } return nil }
{ return err }
minroot.rs
extern crate belllady as bellman; extern crate rand; extern crate ff; use ff::{Field,ScalarEngine}; // For randomness (during paramgen and proof generation) use self::rand::{thread_rng, Rng}; // We'll use these interfaces to construct our circuit. use bellman::{ Circuit, ConstraintSystem, SynthesisError, bls::{Bls12,Engine}, }; use bellman::groth16::{ self, create_random_proof_batch, generate_random_parameters, prepare_verifying_key, verify_proof, Proof, create_random_proof, }; use std::time::{Duration,Instant}; pub const MINROOT_ROUNDS: usize = 10000; // pub fn minroot<E: Engine>(mut xl: E::Fr, mut xr: E::Fr) -> (E::Fr, E::Fr) { // println!(" x1 = {:?}, y1 = {:?}", xl, xr); for _ in 0..MINROOT_ROUNDS { let mut tmp1 = xl;
tmp1.add_assign(&xr); // power equals (2 * p - 1) / 5. Don't delete this, was very hard to figure out. let tmp2 = tmp1.pow([ 0x33333332CCCCCCCD, 0x217F0E679998F199, 0xE14A56699D73F002, 0x2E5F0FBADD72321C, ]); xr = xl; xl = tmp2; } // println!("x3 = {:?}, y3 = {:?}", xl, xr); (xl,xr) } fn fifth_root<E: Engine>(x: E::Fr) -> Option<E::Fr> { Some(x.pow([ 0x33333332CCCCCCCD, 0x217F0E679998F199, 0xE14A56699D73F002, 0x2E5F0FBADD72321C,])) } // proving that I know x1, y1 such that x2^3 == (x1 + y1) #[derive(Clone)] pub struct MinRoot<E: Engine> { pub xl: Option<E::Fr>, pub xr: Option<E::Fr>, } /// Our circuit implements this `Circuit` trait which /// is used during paramgen and proving in order to /// synthesize the constraint system. impl<E: Engine> Circuit<E> for MinRoot<E> { fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError> { // Allocate the first component of the preimage. let mut xl_value = self.xl; let mut xl = cs.alloc_input( || "preimage xl", || xl_value.ok_or(SynthesisError::AssignmentMissing), )?; // Allocate the second component of the preimage. let mut xr_value = self.xr; let mut xr = cs.alloc_input( || "preimage xr", || xr_value.ok_or(SynthesisError::AssignmentMissing), )?; for i in 0..MINROOT_ROUNDS { // xL, xR := (xL + xR)^(1/5), xL let cs = &mut cs.namespace(|| format!("round {}", i)); // power equals (2 * p - 1) / 5. let mut new_xl_value = None; if xl_value.is_none() { } else { let mut tmp1 = xl_value.unwrap(); tmp1.add_assign( &xr_value.unwrap()); new_xl_value = fifth_root::<E>(tmp1); } let new_xl = if i == (MINROOT_ROUNDS - 1) { // This is the last round, xL is our image and so // we allocate a public input. cs.alloc_input( || "image_xl", || new_xl_value.ok_or(SynthesisError::AssignmentMissing), )? } else { cs.alloc( || "new_xl", || new_xl_value.ok_or(SynthesisError::AssignmentMissing), )? }; // tmp2 = (xl_(i+1))^2 let tmp2 = new_xl_value.map(|mut e| { e.square(); e }); // tmp3 = (xl_(i+1))^4 let tmp3 = tmp2.map(|mut e| { e.square(); e }); let tmp2 = cs.alloc( || "tmp2", || tmp2.ok_or(SynthesisError::AssignmentMissing), )?; let tmp3 = cs.alloc( || "tmp3", || tmp3.ok_or(SynthesisError::AssignmentMissing), )?; let new_xr = if i == (MINROOT_ROUNDS - 1) { // This is the last round, xR is our image and so // we allocate a public input. cs.alloc_input( || "image_xr", || xl_value.ok_or(SynthesisError::AssignmentMissing), )? } else { cs.alloc( || "new_xr", || xl_value.ok_or(SynthesisError::AssignmentMissing), )? }; // enforce that tmp2 = tmp1^2 cs.enforce( || "tmp2 = new_xl^2", |lc| lc + new_xl, |lc| lc + new_xl, |lc| lc + tmp2, ); // enforce that tmp3 = tmp2^2 cs.enforce( || "tmp3 = tmp2^2", |lc| lc + tmp2, |lc| lc + tmp2, |lc| lc + tmp3, ); // tmp3 * new_xl = new_xl^5 = xl + xr cs.enforce( || "new_xL^5 = xl + xr", |lc| lc + tmp3, |lc| lc + new_xl, |lc| lc + xl + xr, ); // think this constraint isn't actually necessary because can just take in same witness wire. // new_xr = xl // cs.enforce( // || "new_xr = xl", // |lc| lc + new_xr, // |lc| lc + CS::one(), // |lc| lc + xl, // ); // update xl and xr for next round xr = new_xr; xr_value = xl_value; xl = new_xl; xl_value = new_xl_value; } Ok(()) } } // #[test] fn _minroot_test() { fil_logger::init(); let rng = &mut rand_core::OsRng; println!("Creating parameters..."); // Create parameters for our circuit let params = { let c = MinRoot::<Bls12> { xl: None, xr: None, }; generate_random_parameters(c, rng).unwrap() }; // Prepare the verification key (for proof verification) let pvk = prepare_verifying_key(&params.vk); println!("Creating proofs..."); // Let's benchmark stuff! const SAMPLES: u32 = 50; let mut total_proving = Duration::new(0, 0); let mut total_verifying = Duration::new(0, 0); let mut proof_vec = vec![]; let mut proofs = vec![]; let mut images = vec![]; for _ in 0..SAMPLES { // Generate a random preimage and compute the image let xl = <Bls12 as ScalarEngine>::Fr::random(rng); let xr = <Bls12 as ScalarEngine>::Fr::random(rng); let (image_xl, image_xr) = minroot::<Bls12>(xl, xr); proof_vec.truncate(0); let start = Instant::now(); { // Create an instance of our circuit (with the // witness) let c = MinRoot::<Bls12> { xl: Some(xl), xr: Some(xr), }; // Create a groth16 proof with our parameters. let proof = create_random_proof(c, &params, rng).unwrap(); proof.write(&mut proof_vec).unwrap(); } total_proving += start.elapsed(); let start = Instant::now(); let proof = Proof::read(&proof_vec[..]).unwrap(); // Check the proof assert!(verify_proof(&pvk, &proof, &[xl, xr, image_xl, image_xr]).unwrap()); total_verifying += start.elapsed(); proofs.push(proof); images.push(vec![xl, xr, image_xl, image_xr]); } let proving_avg = total_proving / SAMPLES; let proving_avg = proving_avg.subsec_nanos() as f64 / 1_000_000_000f64 + (proving_avg.as_secs() as f64); let verifying_avg = total_verifying / SAMPLES; let verifying_avg = verifying_avg.subsec_nanos() as f64 / 1_000_000_000f64 + (verifying_avg.as_secs() as f64); println!("Average proving time: {:08}s", proving_avg); println!("Average verifying time: {:08}s", verifying_avg); }
prsstat.rs
#[doc = "Reader of register PRSSTAT"] pub type R = crate::R<u32, super::PRSSTAT>; #[doc = "Reader of field `CMDINHIBITCMD`"] pub type CMDINHIBITCMD_R = crate::R<bool, bool>; #[doc = "Reader of field `CMDINHIBITDAT`"] pub type CMDINHIBITDAT_R = crate::R<bool, bool>; #[doc = "Reader of field `DATLINEACTIVE`"] pub type DATLINEACTIVE_R = crate::R<bool, bool>; #[doc = "Reader of field `RETUNINGREQ`"] pub type RETUNINGREQ_R = crate::R<bool, bool>; #[doc = "Reader of field `WRTRANACT`"] pub type WRTRANACT_R = crate::R<bool, bool>; #[doc = "Reader of field `RDTRANACT`"] pub type RDTRANACT_R = crate::R<bool, bool>; #[doc = "Reader of field `BUFFERWRITEENABLE`"] pub type BUFFERWRITEENABLE_R = crate::R<bool, bool>; #[doc = "Reader of field `BUFRDEN`"] pub type BUFRDEN_R = crate::R<bool, bool>; #[doc = "Reader of field `CARDINS`"] pub type CARDINS_R = crate::R<bool, bool>; #[doc = "Reader of field `CARDSTATESTABLE`"] pub type CARDSTATESTABLE_R = crate::R<bool, bool>; #[doc = "Reader of field `CARDDETPINLVL`"] pub type CARDDETPINLVL_R = crate::R<bool, bool>; #[doc = "Reader of field `WRPROTSWPINLVL`"] pub type WRPROTSWPINLVL_R = crate::R<bool, bool>; #[doc = "Reader of field `DAT3TO0SIGLVL`"] pub type DAT3TO0SIGLVL_R = crate::R<u8, u8>; #[doc = "Reader of field `CMDSIGLVL`"] pub type CMDSIGLVL_R = crate::R<bool, bool>; #[doc = "Reader of field `DAT7TO4SIGLVL`"] pub type DAT7TO4SIGLVL_R = crate::R<u8, u8>; impl R { #[doc = "Bit 0 - Command Inhibit (CMD)"] #[inline(always)] pub fn cmdinhibitcmd(&self) -> CMDINHIBITCMD_R { CMDINHIBITCMD_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Command Inhibit (DAT)"] #[inline(always)] pub fn cmdinhibitdat(&self) -> CMDINHIBITDAT_R { CMDINHIBITDAT_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - DAT Line Active"] #[inline(always)] pub fn datlineactive(&self) -> DATLINEACTIVE_R { DATLINEACTIVE_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - Re-Tuning Request"] #[inline(always)] pub fn retuningreq(&self) -> RETUNINGREQ_R { RETUNINGREQ_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 8 - Write Transfer Active"] #[inline(always)] pub fn wrtranact(&self) -> WRTRANACT_R { WRTRANACT_R::new(((self.bits >> 8) & 0x01) != 0) } #[doc = "Bit 9 - Read Transfer Active"] #[inline(always)] pub fn rdtranact(&self) -> RDTRANACT_R { RDTRANACT_R::new(((self.bits >> 9) & 0x01) != 0) } #[doc = "Bit 10 - Buffer Write Enable"] #[inline(always)] pub fn bufferwriteenable(&self) -> BUFFERWRITEENABLE_R { BUFFERWRITEENABLE_R::new(((self.bits >> 10) & 0x01) != 0) } #[doc = "Bit 11 - Buffer Read Enable"] #[inline(always)] pub fn bufrden(&self) -> BUFRDEN_R { BUFRDEN_R::new(((self.bits >> 11) & 0x01) != 0) } #[doc = "Bit 16 - Card Inserted Status"] #[inline(always)] pub fn cardins(&self) -> CARDINS_R { CARDINS_R::new(((self.bits >> 16) & 0x01) != 0) } #[doc = "Bit 17 - Card State Stable Status"] #[inline(always)] pub fn
(&self) -> CARDSTATESTABLE_R { CARDSTATESTABLE_R::new(((self.bits >> 17) & 0x01) != 0) } #[doc = "Bit 18 - Card Detect Pin Level"] #[inline(always)] pub fn carddetpinlvl(&self) -> CARDDETPINLVL_R { CARDDETPINLVL_R::new(((self.bits >> 18) & 0x01) != 0) } #[doc = "Bit 19 - Write Protect Switch Pin Level"] #[inline(always)] pub fn wrprotswpinlvl(&self) -> WRPROTSWPINLVL_R { WRPROTSWPINLVL_R::new(((self.bits >> 19) & 0x01) != 0) } #[doc = "Bits 20:23 - DAT\\[3:0\\] Line Signal Level"] #[inline(always)] pub fn dat3to0siglvl(&self) -> DAT3TO0SIGLVL_R { DAT3TO0SIGLVL_R::new(((self.bits >> 20) & 0x0f) as u8) } #[doc = "Bit 24 - Command Line Signal Level"] #[inline(always)] pub fn cmdsiglvl(&self) -> CMDSIGLVL_R { CMDSIGLVL_R::new(((self.bits >> 24) & 0x01) != 0) } #[doc = "Bits 25:28 - DAT\\[7:4\\] Line Signal Level"] #[inline(always)] pub fn dat7to4siglvl(&self) -> DAT7TO4SIGLVL_R { DAT7TO4SIGLVL_R::new(((self.bits >> 25) & 0x0f) as u8) } }
cardstatestable
CommentInputField.tsx
import { FormEvent, useCallback, useEffect, useState } from "react"; export const CommentInputField: React.VFC<{ placeholder?: string; defaultValue?: string; }> = ({ placeholder, defaultValue }) => { const [value, setValue] = useState(defaultValue); useEffect(() => { setValue(defaultValue); }, [defaultValue]); const onChange = useCallback((e: FormEvent<HTMLInputElement>) => { setValue(e.currentTarget.value); }, []); return ( <div className="w-full md:w-1/3 py-1 px-2 mb-6 md:mb-0"> <label className="block tracking-wide text-gray-700 text-xs font-bold mb-2" htmlFor="comment" > Comment for this changeset (* required) </label> <input className="appearance-none block w-full leading-tight rounded py-2 px-1 border border-gray-300 bg-gray-100 text-black placeholder-gray-500 placeholder-opacity-50 focus:outline-none focus:bg-white focus:border-gray-500" id="comment" name="comment" type="text" placeholder={placeholder} value={value} required={true} onChange={onChange} /> </div>
); };
index.ts
export { default as alertBehavior } from './Alert/alertBehavior'; export * from './Alert/alertBehavior'; export { default as alertWarningBehavior } from './Alert/alertWarningBehavior'; export { default as alertBaseBehavior } from './Alert/alertBaseBehavior'; export * from './Attachment/attachmentBehavior'; export { default as attachmentBehavior } from './Attachment/attachmentBehavior'; export { default as buttonBehavior } from './Button/buttonBehavior'; export * from './Button/buttonBehavior'; export { default as toggleButtonBehavior } from './Button/toggleButtonBehavior'; export * from './Image/imageBehavior'; export { default as imageBehavior } from './Image/imageBehavior'; export { default as menuBehavior } from './Menu/menuBehavior'; export { default as menuItemBehavior } from './Menu/menuItemBehavior'; export { default as menuDividerBehavior } from './Menu/menuDividerBehavior'; export { default as submenuBehavior } from './Menu/submenuBehavior'; export { default as basicListBehavior } from './List/listBehavior'; export { default as basicListItemBehavior } from './List/basicListItemBehavior'; export { default as listBehavior } from './List/listBehavior'; export * from './List/listBehavior'; export { default as listItemBehavior } from './List/listItemBehavior'; export * from './List/listItemBehavior'; export { default as navigableListBehavior } from './List/navigableListBehavior'; export { default as navigableListItemBehavior } from './List/navigableListItemBehavior'; export { default as selectableListBehavior } from './List/selectableListBehavior'; export { default as selectableListItemBehavior } from './List/selectableListItemBehavior'; export { default as loaderBehavior } from './Loader/loaderBehavior'; export { default as inputBehavior } from './Input/inputBehavior'; export { default as iconBehavior } from './Icon/iconBehavior'; export * from './Icon/iconBehavior'; export { default as indicatorBehavior } from './Box/indicatorBehavior'; export * from './Box/indicatorBehavior'; export { default as tabBehavior } from './Tab/tabBehavior'; export { default as tabListBehavior } from './Tab/tabListBehavior'; export { default as menuAsToolbarBehavior } from './Toolbar/menuAsToolbarBehavior'; export * from './Toolbar/toolbarMenuBehavior'; export { default as toolbarMenuBehavior } from './Toolbar/toolbarMenuBehavior'; export { default as toolbarMenuItemBehavior } from './Toolbar/toolbarMenuItemBehavior'; export * from './Toolbar/toolbarMenuItemBehavior'; export { default as menuItemAsToolbarButtonBehavior } from './Toolbar/menuItemAsToolbarButtonBehavior'; export { default as toolbarBehavior } from './Toolbar/toolbarBehavior'; export { default as toolbarItemBehavior } from './Toolbar/toolbarItemBehavior'; export * from './Toolbar/toolbarItemBehavior'; export { default as toolbarMenuItemCheckboxBehavior } from './Toolbar/toolbarMenuItemCheckboxBehavior'; export { default as toolbarMenuItemRadioBehavior } from './Toolbar/toolbarMenuItemRadioBehavior'; export * from './Toolbar/toolbarMenuRadioGroupBehavior'; export { default as toolbarMenuRadioGroupBehavior } from './Toolbar/toolbarMenuRadioGroupBehavior'; export * from './Toolbar/toolbarRadioGroupBehavior'; export { default as toolbarRadioGroupBehavior } from './Toolbar/toolbarRadioGroupBehavior'; export { default as toolbarRadioGroupItemBehavior } from './Toolbar/toolbarRadioGroupItemBehavior'; export { default as radioGroupBehavior } from './Radio/radioGroupBehavior'; export { default as radioGroupItemBehavior } from './Radio/radioGroupItemBehavior'; export * from './Popup/popupBehavior'; export { default as popupBehavior } from './Popup/popupBehavior'; export { default as chatBehavior } from './Chat/chatBehavior'; export * from './Chat/chatBehavior'; export { default as chatMessageBehavior } from './Chat/chatMessageBehavior'; export * from './Chat/chatMessageBehavior'; export { default as gridBehavior } from './Grid/gridBehavior'; export { default as gridHorizontalBehavior } from './Grid/gridHorizontalBehavior'; export { default as hierarchicalTreeBehavior } from './HierarchicalTree/hierarchicalTreeBehavior'; export { default as hierarchicalTreeItemBehavior } from './HierarchicalTree/hierarchicalTreeItemBehavior'; export { default as hierarchicalTreeTitleBehavior } from './HierarchicalTree/hierarchicalTreeTitleBehavior'; export { default as hierarchicalSubtreeBehavior } from './HierarchicalTree/hierarchicalSubtreeBehavior'; export { default as dialogBehavior } from './Dialog/dialogBehavior'; export { default as statusBehavior } from './Status/statusBehavior'; export { default as embedBehavior } from './Embed/embedBehavior'; export { default as accordionBehavior } from './Accordion/accordionBehavior'; export { default as accordionTitleBehavior } from './Accordion/accordionTitleBehavior';
export { default as checkboxBehavior } from './Checkbox/checkboxBehavior'; export * from './Checkbox/checkboxBehavior'; export * from './Tooltip/tooltipAsDescriptionBehavior'; export { default as tooltipAsDescriptionBehavior } from './Tooltip/tooltipAsDescriptionBehavior'; export { default as tooltipAsLabelBehavior } from './Tooltip/tooltipAsLabelBehavior'; export { default as sliderBehavior } from './Slider/sliderBehavior'; export * from './Slider/sliderBehavior'; export { default as menuButtonBehavior } from './MenuButton/menuButtonBehavior'; export { default as splitButtonBehavior } from './SplitButton/splitButtonBehavior'; export * from './SplitButton/splitButtonBehavior'; export { default as treeBehavior } from './Tree/treeBehavior'; export * from './Tree/treeItemBehavior'; export { default as treeItemBehavior } from './Tree/treeItemBehavior'; export * from './Tree/treeTitleBehavior'; export { default as treeTitleBehavior } from './Tree/treeTitleBehavior'; export { default as textAreaBehavior } from './TextArea/textAreaBehavior'; export * from './TextArea/textAreaBehavior'; export { default as treeAsListBehavior } from './Tree/treeAsListBehavior'; export { default as treeItemAsListItemBehavior } from './Tree/treeItemAsListItemBehavior'; export { default as treeTitleAsListItemTitleBehavior } from './Tree/treeTitleAsListItemTitleBehavior'; export { default as carouselBehavior } from './Carousel/carouselBehavior'; export { default as carouselItemBehavior } from './Carousel/carouselItemBehavior'; export { default as tableBehavior } from './Table/tableBehavior'; export { default as tableRowBehavior } from './Table/tableRowBehavior'; export { default as tableCellBehavior } from './Table/tableCellBehavior'; export * from './Table/tableCellBehavior'; export { default as tableHeaderCellBehavior } from './Table/tableHeaderCellBehavior'; export { default as gridNestedBehavior } from './Table/gridNestedBehavior'; export { default as gridHeaderRowBehavior } from './Table/gridHeaderRowBehavior'; export { default as gridHeaderCellBehavior } from './Table/gridHeaderCellBehavior'; export { default as gridRowBehavior } from './Table/gridRowBehavior'; export * from './Table/gridRowBehavior'; export { default as gridRowNestedBehavior } from './Table/gridRowNestedBehavior'; export { default as gridCellBehavior } from './Table/gridCellBehavior'; export { default as gridCellMultipleFocusableBehavior } from './Table/gridCellMultipleFocusableBehavior'; export { default as gridCellWithFocusableElementBehavior } from './Table/gridCellWithFocusableElementBehavior'; export { default as cardBehavior } from './Card/cardBehavior'; export * from './Card/cardBehavior'; export { default as cardFocusableBehavior } from './Card/cardFocusableBehavior'; export { default as cardChildrenFocusableBehavior } from './Card/cardChildrenFocusableBehavior'; export { default as videoBehavior } from './Video/videoBehavior'; export * from './Video/videoBehavior';
export { default as accordionContentBehavior } from './Accordion/accordionContentBehavior';
checker.py
import logging from collections import namedtuple from typing import (Any, Callable, Dict, # pylint: disable=unused-import Generator, Iterable, List, Optional, Text, Union, cast) import schema_salad.validate as validate from schema_salad.sourceline import SourceLine, bullets, strip_dup_lineno import six from .errors import WorkflowException from .loghandler import _logger from .process import shortname from .utils import json_dumps def _get_type(tp): # type: (Any) -> Any if isinstance(tp, dict): if tp.get("type") not in ("array", "record", "enum"): return tp["type"] return tp def check_types(srctype, sinktype, linkMerge, valueFrom): # type: (Any, Any, Optional[Text], Optional[Text]) -> Text """Check if the source and sink types are "pass", "warning", or "exception". """ if valueFrom: return "pass" elif not linkMerge: if can_assign_src_to_sink(srctype, sinktype, strict=True): return "pass" elif can_assign_src_to_sink(srctype, sinktype, strict=False): return "warning" else:
elif linkMerge == "merge_nested": return check_types({"items": _get_type(srctype), "type": "array"}, _get_type(sinktype), None, None) elif linkMerge == "merge_flattened": return check_types(merge_flatten_type(_get_type(srctype)), _get_type(sinktype), None, None) else: raise WorkflowException(u"Unrecognized linkMerge enu_m '%s'" % linkMerge) def merge_flatten_type(src): # type: (Any) -> Any """Return the merge flattened type of the source type """ if isinstance(src, list): return [merge_flatten_type(t) for t in src] elif isinstance(src, dict) and src.get("type") == "array": return src else: return {"items": src, "type": "array"} def can_assign_src_to_sink(src, sink, strict=False): # type: (Any, Any, bool) -> bool """Check for identical type specifications, ignoring extra keys like inputBinding. src: admissible source types sink: admissible sink types In non-strict comparison, at least one source type must match one sink type. In strict comparison, all source types must match at least one sink type. """ if src == "Any" or sink == "Any": return True if isinstance(src, dict) and isinstance(sink, dict): if sink.get("not_connected") and strict: return False if src["type"] == "array" and sink["type"] == "array": return can_assign_src_to_sink(src["items"], sink["items"], strict) elif src["type"] == "record" and sink["type"] == "record": return _compare_records(src, sink, strict) elif src["type"] == "File" and sink["type"] == "File": for sinksf in sink.get("secondaryFiles", []): if not [1 for srcsf in src.get("secondaryFiles", []) if sinksf == srcsf]: if strict: return False return True else: return can_assign_src_to_sink(src["type"], sink["type"], strict) elif isinstance(src, list): if strict: for t in src: if not can_assign_src_to_sink(t, sink): return False return True else: for t in src: if can_assign_src_to_sink(t, sink): return True return False elif isinstance(sink, list): for t in sink: if can_assign_src_to_sink(src, t): return True return False else: return src == sink def _compare_records(src, sink, strict=False): # type: (Dict[Text, Any], Dict[Text, Any], bool) -> bool """Compare two records, ensuring they have compatible fields. This handles normalizing record names, which will be relative to workflow step, so that they can be compared. """ def _rec_fields(rec): # type: (Dict[Text, Any]) -> Dict[Text, Any] out = {} for field in rec["fields"]: name = shortname(field["name"]) out[name] = field["type"] return out srcfields = _rec_fields(src) sinkfields = _rec_fields(sink) for key in six.iterkeys(sinkfields): if (not can_assign_src_to_sink( srcfields.get(key, "null"), sinkfields.get(key, "null"), strict) and sinkfields.get(key) is not None): _logger.info("Record comparison failure for %s and %s\n" "Did not match fields for %s: %s and %s" % (src["name"], sink["name"], key, srcfields.get(key), sinkfields.get(key))) return False return True def static_checker(workflow_inputs, workflow_outputs, step_inputs, step_outputs, param_to_step): # type: (List[Dict[Text, Any]], List[Dict[Text, Any]], List[Dict[Text, Any]], List[Dict[Text, Any]], Dict[Text, Dict[Text, Any]]) -> None """Check if all source and sink types of a workflow are compatible before run time. """ # source parameters: workflow_inputs and step_outputs # sink parameters: step_inputs and workflow_outputs # make a dictionary of source parameters, indexed by the "id" field src_parms = workflow_inputs + step_outputs src_dict = {} for parm in src_parms: src_dict[parm["id"]] = parm step_inputs_val = check_all_types(src_dict, step_inputs, "source") workflow_outputs_val = check_all_types(src_dict, workflow_outputs, "outputSource") warnings = step_inputs_val["warning"] + workflow_outputs_val["warning"] exceptions = step_inputs_val["exception"] + workflow_outputs_val["exception"] warning_msgs = [] exception_msgs = [] for warning in warnings: src = warning.src sink = warning.sink linkMerge = warning.linkMerge if sink.get("secondaryFiles") and sorted(sink.get("secondaryFiles",[])) != sorted(src.get("secondaryFiles",[])): msg1 = "Sink '%s'" % (shortname(sink["id"])) msg2 = SourceLine(sink.get("_tool_entry", sink), "secondaryFiles").makeError( "expects secondaryFiles: %s but" % (sink.get("secondaryFiles"))) if "secondaryFiles" in src: msg3 = SourceLine(src, "secondaryFiles").makeError( "source '%s' has secondaryFiles %s." % (shortname(src["id"]), src.get("secondaryFiles"))) else: msg3 = SourceLine(src, "id").makeError( "source '%s' does not include secondaryFiles." % (shortname(src["id"]))) msg4 = SourceLine(src, "id").makeError("To fix, add secondaryFiles: %s to definition of '%s'." % (sink.get("secondaryFiles"), shortname(src["id"]))) msg = SourceLine(sink).makeError("%s\n%s" % (msg1, bullets([msg2, msg3, msg4], " "))) elif sink.get("not_connected"): msg = SourceLine(sink, "type").makeError( "'%s' is not an input parameter of %s, expected %s" % (shortname(sink["id"]), param_to_step[sink["id"]]["run"], ", ".join(shortname(s["id"]) for s in param_to_step[sink["id"]]["inputs"] if not s.get("not_connected")))) else: msg = SourceLine(src, "type").makeError( "Source '%s' of type %s may be incompatible" % (shortname(src["id"]), json_dumps(src["type"]))) + "\n" + \ SourceLine(sink, "type").makeError( " with sink '%s' of type %s" % (shortname(sink["id"]), json_dumps(sink["type"]))) if linkMerge: msg += "\n" + SourceLine(sink).makeError(" source has linkMerge method %s" % linkMerge) warning_msgs.append(msg) for exception in exceptions: src = exception.src sink = exception.sink linkMerge = exception.linkMerge msg = SourceLine(src, "type").makeError( "Source '%s' of type %s is incompatible" % (shortname(src["id"]), json_dumps(src["type"]))) + "\n" + \ SourceLine(sink, "type").makeError( " with sink '%s' of type %s" % (shortname(sink["id"]), json_dumps(sink["type"]))) if linkMerge: msg += "\n" + SourceLine(sink).makeError(" source has linkMerge method %s" % linkMerge) exception_msgs.append(msg) for sink in step_inputs: if ('null' != sink["type"] and 'null' not in sink["type"] and "source" not in sink and "default" not in sink and "valueFrom" not in sink): msg = SourceLine(sink).makeError( "Required parameter '%s' does not have source, default, or valueFrom expression" % shortname(sink["id"])) exception_msgs.append(msg) all_warning_msg = strip_dup_lineno("\n".join(warning_msgs)) all_exception_msg = strip_dup_lineno("\n".join(exception_msgs)) if warnings: _logger.warning("Workflow checker warning:\n%s" % all_warning_msg) if exceptions: raise validate.ValidationException(all_exception_msg) SrcSink = namedtuple("SrcSink", ["src", "sink", "linkMerge"]) def check_all_types(src_dict, sinks, sourceField): # type: (Dict[Text, Any], List[Dict[Text, Any]], Text) -> Dict[Text, List[SrcSink]] # sourceField is either "soure" or "outputSource" """Given a list of sinks, check if their types match with the types of their sources. """ validation = {"warning": [], "exception": []} # type: Dict[Text, List[SrcSink]] for sink in sinks: if sourceField in sink: valueFrom = sink.get("valueFrom") if isinstance(sink[sourceField], list): srcs_of_sink = [src_dict[parm_id] for parm_id in sink[sourceField]] linkMerge = sink.get("linkMerge", ("merge_nested" if len(sink[sourceField]) > 1 else None)) else: parm_id = sink[sourceField] srcs_of_sink = [src_dict[parm_id]] linkMerge = None for src in srcs_of_sink: check_result = check_types(src, sink, linkMerge, valueFrom) if check_result == "warning": validation["warning"].append(SrcSink(src, sink, linkMerge)) elif check_result == "exception": validation["exception"].append(SrcSink(src, sink, linkMerge)) return validation
return "exception"
DeployClusters.js
/* Copyright 2015 Skippbox, Ltd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ import AltContainer from 'alt-container'; import Colors, { defaultNavigatorStyle } from 'styles/Colors'; import ScrollView from 'components/commons/ScrollView'; import ListItem from 'components/commons/ListItem'; import ListHeader from 'components/commons/ListHeader'; import ChartsUtils from 'utils/ChartsUtils'; import NavigationUtils from 'utils/NavigationUtils'; import DeploymentsActions from 'actions/DeploymentsActions'; import ServicesActions from 'actions/ServicesActions'; import ClustersActions from 'actions/ClustersActions'; import BaseApi from 'api/BaseApi'; import PropTypes from 'prop-types'; const { View, Text, Image, StyleSheet, ActivityIndicator, Alert } = ReactNative; const MAX_RETRIES = 3; // number of retries to deploy, if it fails for unknown reason const styles = StyleSheet.create({ container: { flex: 1, backgroundColor: Colors.BACKGROUND, }, list: { flex: 1, }, content: { paddingTop: 20, }, chart: { backgroundColor: Colors.WHITE, borderColor: Colors.BORDER, borderBottomWidth: 1, flexDirection: 'row', alignItems: 'center', paddingVertical: 10, paddingHorizontal: 15, }, chartIcon: { width: 45, height: 45, alignSelf: 'center', }, chartTexts: { flex: 1, flexDirection: 'column', marginLeft: 8, }, chartTitle: { fontSize: 16, fontWeight: '600', backgroundColor: 'transparent', }, chartSubtitle: { marginTop: 2, fontSize: 12, }, loader: { position: 'absolute', top: 0, left: 0, bottom: 0, right: 0, backgroundColor: 'rgba(0, 0, 0, 0.3)', alignItems: 'center', justifyContent: 'center', }, loaderText: { color: Colors.WHITE, marginTop: 20, }, deployed: { flexDirection: 'column', marginTop: 20, }, deployedImage: { width: 50, height: 50, resizeMode: 'contain', tintColor: Colors.GREEN, marginBottom: 20, }, deployedTitle: { color: Colors.GRAY, fontSize: 20, textAlign: 'center', marginBottom: 20, paddingHorizontal: 10, }, }); export class
extends Component { static navigatorStyle = defaultNavigatorStyle; render() { return ( <AltContainer stores={{ clusters: () => { return { store: alt.stores.ClustersStore, value: alt.stores.ClustersStore.getClusters(), }; }, }} > <DeployClusters chart={this.props.chart} clusters={alt.stores.ClustersStore.getClusters()} navigator={this.props.navigator} /> </AltContainer> ); } } export default class DeployClusters extends Component { static propTypes = { chart: PropTypes.instanceOf(Immutable.Map).isRequired, clusters: PropTypes.instanceOf(Immutable.List).isRequired, }; constructor() { super(); this.state = { loading: false, deployed: false, selectedCluster: null, error: null, loadingMessage: '', }; this.deployTries = MAX_RETRIES; } componentWillUpdate(nextProps, nextState) { if (!nextState.loading && this.state.loadingMessage) { this.setState({ loadingMessage: '' }); } } render() { const { chart } = this.props; const { selectedCluster } = this.state; const file = chart.get('chartfile'); return ( <View style={styles.container}> <ScrollView style={styles.list} contentContainerStyle={styles.content}> <ListHeader title={intl('chart')} /> <View style={styles.chart}> <Image style={styles.chartIcon} source={ChartsUtils.iconForChart(file.get('name'))} /> <View style={styles.chartTexts}> <Text style={styles.chartTitle} numberOfLines={2}> {file.get('name')} </Text> <Text style={styles.chartSubtitle} numberOfLines={2}> {file.get('description')} </Text> </View> </View> {selectedCluster && [ <ListHeader key="selectedClusterHeader" title={intl('deploy_selected_cluster')} />, <ListItem key="selectedCluster" title={selectedCluster.get('name')} subtitle={selectedCluster.get('url')} isLast={true} />, ]} {this.renderClusters()} {this.renderDeploySuccess()} {this.renderDeployError()} </ScrollView> {this.state.loading && ( <View style={styles.loader}> <ActivityIndicator color={Colors.WHITE} size="large" /> <Text style={styles.loaderText}>{this.state.loadingMessage}</Text> </View> )} </View> ); } renderClusters() { if (this.state.deployed || this.state.selectedCluster) { return false; } const { clusters } = this.props; const items = clusters.map((cluster, i) => { return ( <ListItem key={i} title={cluster.get('name')} subtitle={cluster.get('url')} showArrow={true} onPress={() => this.chooseCluster(cluster)} isLast={i === clusters.size - 1} /> ); }); return [ <ListHeader key="title" title={intl('deploy_choose_cluster')} />, items, ]; } renderDeploySuccess() { if (!this.state.deployed) { return false; } return this.renderStatus({ title: intl('deploy_success_title'), actionTitle: intl('deploy_success_action'), action: () => this.openCluster(this.state.selectedCluster), image: require('images/done_circle.png'), tintColor: Colors.GREEN, }); } renderDeployError() { if (!this.state.error || this.state.deployed) { return false; } return [ this.renderStatus({ title: intl('deploy_error_title', { message: this.state.error.message, }), actionTitle: intl('deploy_error_action'), action: () => this.chooseCluster(this.state.selectedCluster), image: require('images/error_circle.png'), tintColor: Colors.RED, }), <ListItem key="action" title={intl('deploy_error_action_2')} showArrow={true} onPress={() => this.setState({ selectedCluster: null, error: null })} isLast={true} />, ]; } renderStatus({ title, actionTitle, action, image, tintColor }) { return ( <View key="status" style={styles.deployed}> <View style={{ alignItems: 'center' }}> <Image style={[styles.deployedImage, { tintColor }]} source={image} /> <Text style={styles.deployedTitle}>{title}</Text> </View> <ListItem title={actionTitle} showArrow={true} onPress={action} isLast={true} /> </View> ); } chooseCluster(cluster) { this.setState({ loading: true, error: null, deployed: false, selectedCluster: cluster, loadingMessage: intl('deploy_loading_deployments'), }); DeploymentsActions.fetchDeployments(cluster) .then(dps => { const tillerDP = dps && dps.find(dp => dp.getIn(['metadata', 'name']) === 'tiller-deploy'); if (!tillerDP) { return new Promise((resolve, reject) => { Alert.alert( intl('deploy_no_tiller_dp_alert_title'), intl('deploy_no_tiller_dp_alert_subtitle'), [ { text: intl('cancel'), onPress: () => this.setState({ loading: false }), }, { text: intl('ok'), onPress: () => { this.createTillerDeploy(cluster) .then(deployment => { return this.createTillerSVC({ cluster, deployment }); }) .catch(e => { reject(e); }) .then(service => resolve(service)); }, }, ] ); }); } return this.findService({ cluster, deployment: tillerDP }); }) .then(service => { this.setState({ loading: true, loadingMessage: intl('deploy_loading_deploy_chart'), }); return this.deployChart({ chart: this.props.chart, service, cluster }); }) .then(() => { ClustersActions.fetchClusterEntities(cluster); this.setState({ deployed: true }); }) .catch(e => { this.setState({ error: e }); // SnackbarUtils.showError({title: e.message}); }) .finally(() => { this.deployTries = MAX_RETRIES; this.setState({ loading: false }); }); } findService({ cluster, deployment }) { this.setState({ loading: true, loadingMessage: intl('deploy_loading_service'), }); return ServicesActions.fetchServices(cluster).then(svcs => { const tillerSVC = svcs && svcs.find( svc => svc.getIn(['metadata', 'labels', 'run']) === deployment.getIn(['metadata', 'name']) ); if (!tillerSVC) { return new Promise(resolve => { Alert.alert( intl('deploy_no_tiller_svc_alert_title'), intl('deploy_no_tiller_svc_alert_subtitle'), [ { text: intl('cancel'), onPress: () => this.setState({ loading: false }), }, { text: intl('ok'), onPress: () => this.createTillerSVC({ cluster, deployment }).then(service => resolve(service) ), }, ] ); }); } return tillerSVC; }); } createTillerDeploy(cluster) { this.setState({ loading: true, loadingMessage: intl('deploy_loading_create_deploy'), }); return DeploymentsActions.createDeployment({ cluster, name: 'tiller-deploy', image: 'gcr.io/kubernetes-helm/tiller:v2.0.0', namespace: 'kube-system', }); } createTillerSVC({ cluster, deployment }) { this.setState({ loading: true, loadingMessage: intl('deploy_loading_create_service'), }); return ServicesActions.createService({ cluster, deployment, type: 'NodePort', port: 44134, name: deployment.getIn(['metadata', 'name']), }); } deployChart({ chart, service, cluster }) { return new Promise((resolve, reject) => { this.sendChart({ chart, service, cluster, resolve, reject }); }); } sendChart({ chart, service, cluster, resolve, reject }) { this.deployTries--; BaseApi.deployChart({ chart, service, cluster }) .then(r => { resolve(r); }) .catch(e => { if (this.deployTries > 0) { setTimeout(() => { this.sendChart({ chart, service, cluster, resolve, reject }); }, 3000); } else { reject(e); } }); } openCluster(cluster) { this.props.navigator.popToRoot(); NavigationUtils.selectTab(0, this.props.navigator); NavigationUtils.pushOnTab(0, { screen: 'cabin.ClustersShow', title: cluster.get('name'), backButtonTitle: '', passProps: { cluster }, }); } }
DeployClustersContainer
signable.go
// Copyright Fuzamei Corp. 2018 All Rights Reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package types import ( "bytes" "encoding/json" "errors" "fmt" "io" "time" "github.com/33cn/chain33/common/address" "github.com/33cn/chain33/common/crypto" "github.com/33cn/chain33/common/log/log15" ) // error defines var ( ErrNotifyInvalidValidatorAddress = errors.New("Invalid validator address for notify") ErrNotifyInvalidValidatorIndex = errors.New("Invalid validator index for notify") ErrNotifyInvalidSignature = errors.New("Invalid notify signature") ErrVoteInvalidValidatorIndex = errors.New("Invalid validator index for vote") ErrVoteInvalidValidatorAddress = errors.New("Invalid validator address for vote") ErrVoteInvalidSignature = errors.New("Invalid vote signature") ErrVoteNil = errors.New("Nil vote") votelog = log15.New("module", "tendermint-vote") ConsensusCrypto crypto.Crypto SecureConnCrypto crypto.Crypto ) // Signable is an interface for all signable things. // It typically removes signatures before serializing. type Signable interface { WriteSignBytes(chainID string, w io.Writer, n *int, err *error) } // SignBytes is a convenience method for getting the bytes to sign of a Signable. func SignBytes(chainID string, o Signable) []byte { buf, n, err := new(bytes.Buffer), new(int), new(error) o.WriteSignBytes(chainID, buf, n, err) if *err != nil { PanicCrisis(err) } return buf.Bytes() } // Vote Represents a vote from validators for consensus. type Vote struct { *DPosVote } // WriteSignBytes ... func (vote *Vote) WriteSignBytes(chainID string, w io.Writer, n *int, err *error) { if *err != nil { return } canonical := CanonicalJSONOnceVote{ chainID, CanonicalVote(vote), } byteVote, e := json.Marshal(&canonical) if e != nil { *err = e votelog.Error("vote WriteSignBytes marshal failed", "err", e) return } number, writeErr := w.Write(byteVote) *n = number *err = writeErr } // Copy ... func (vote *Vote) Copy() *Vote { voteCopy := *vote return &voteCopy
} func (vote *Vote) String() string { if vote == nil { return "nil-Vote" } return fmt.Sprintf("Vote{VotedNodeIndex:%v, VotedNodeAddr:%X,Cycle[%v,%v],Period[%v,%v],StartHeight:%v,VoteId:%X,VoteTimeStamp:%v,VoteNodeIndex:%v,VoteNodeAddr:%X,Sig:%X}", vote.VoteItem.VotedNodeIndex, Fingerprint(vote.VoteItem.VotedNodeAddress), vote.VoteItem.CycleStart, vote.VoteItem.CycleStop, vote.VoteItem.PeriodStart, vote.VoteItem.PeriodStop, vote.VoteItem.Height, Fingerprint(vote.VoteItem.VoteID), CanonicalTime(time.Unix(0, vote.VoteTimestamp)), vote.VoterNodeIndex, Fingerprint(vote.VoterNodeAddress), Fingerprint(vote.Signature), ) } // Verify ... func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error { addr := address.BytesToBtcAddress(address.NormalVer, pubKey.Bytes()).Hash160[:] if !bytes.Equal(addr, vote.VoterNodeAddress) { return ErrVoteInvalidValidatorAddress } sig, err := ConsensusCrypto.SignatureFromBytes(vote.Signature) if err != nil { votelog.Error("vote Verify failed", "err", err) return err } if !pubKey.VerifyBytes(SignBytes(chainID, vote), sig) { return ErrVoteInvalidSignature } return nil } // Hash ... func (vote *Vote) Hash() []byte { if vote == nil { //votelog.Error("vote hash is nil") return nil } bytes, err := json.Marshal(vote) if err != nil { votelog.Error("vote hash marshal failed", "err", err) return nil } return crypto.Ripemd160(bytes) } // Notify Represents a notify from validators for consensus. type Notify struct { *DPosNotify } // WriteSignBytes ... func (notify *Notify) WriteSignBytes(chainID string, w io.Writer, n *int, err *error) { if *err != nil { return } canonical := CanonicalJSONOnceNotify{ chainID, CanonicalNotify(notify), } byteVote, e := json.Marshal(&canonical) if e != nil { *err = e votelog.Error("vote WriteSignBytes marshal failed", "err", e) return } number, writeErr := w.Write(byteVote) *n = number *err = writeErr } // Copy ... func (notify *Notify) Copy() *Notify { notifyCopy := *notify return &notifyCopy } func (notify *Notify) String() string { if notify == nil { return "nil-notify" } return fmt.Sprintf("Notify{VotedNodeIndex:%v, VotedNodeAddr:%X,Cycle[%v,%v],Period[%v,%v],StartHeight:%v,VoteId:%X,NotifyTimeStamp:%v,HeightStop:%v,NotifyNodeIndex:%v,NotifyNodeAddr:%X,Sig:%X}", notify.Vote.VotedNodeIndex, Fingerprint(notify.Vote.VotedNodeAddress), notify.Vote.CycleStart, notify.Vote.CycleStop, notify.Vote.PeriodStart, notify.Vote.PeriodStop, notify.Vote.Height, Fingerprint(notify.Vote.VoteID), CanonicalTime(time.Unix(0, notify.NotifyTimestamp)), notify.HeightStop, notify.NotifyNodeIndex, Fingerprint(notify.NotifyNodeAddress), Fingerprint(notify.Signature), ) } // Verify ... func (notify *Notify) Verify(chainID string, pubKey crypto.PubKey) error { addr := address.BytesToBtcAddress(address.NormalVer, pubKey.Bytes()).Hash160[:] if !bytes.Equal(addr, notify.NotifyNodeAddress) { return ErrNotifyInvalidValidatorAddress } sig, err := ConsensusCrypto.SignatureFromBytes(notify.Signature) if err != nil { votelog.Error("Notify Verify failed", "err", err) return err } if !pubKey.VerifyBytes(SignBytes(chainID, notify), sig) { return ErrNotifyInvalidSignature } return nil } // Hash ... func (notify *Notify) Hash() []byte { if notify == nil { //votelog.Error("vote hash is nil") return nil } bytes, err := json.Marshal(notify) if err != nil { votelog.Error("vote hash marshal failed", "err", err) return nil } return crypto.Ripemd160(bytes) }
main.rs
mod snn;
fn main() { println!("Hello, world!"); }
full_stack.rs
// This file is Copyright its original authors, visible in version control // history. // // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option. // You may not use this file except in accordance with one or both of these // licenses. //! Test that no series of bytes received over the wire/connections created/payments sent can //! result in a crash. We do this by standing up a node and then reading bytes from input to denote //! actions such as creating new inbound/outbound connections, bytes to be read from a connection, //! or payments to send/ways to handle events generated. //! This test has been very useful, though due to its complexity good starting inputs are critical. use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::transaction::{Transaction, TxOut}; use bitcoin::blockdata::script::{Builder, Script}; use bitcoin::blockdata::opcodes; use bitcoin::consensus::encode::deserialize; use bitcoin::network::constants::Network; use bitcoin::blockdata::constants::genesis_block; use bitcoin::hashes::Hash as TraitImport; use bitcoin::hashes::HashEngine as TraitImportEngine; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash}; use lightning::chain; use lightning::chain::{BestBlock, Confirm, Listen}; use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; use lightning::chain::chainmonitor; use lightning::chain::transaction::OutPoint; use lightning::chain::keysinterface::{InMemorySigner, KeysInterface}; use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::ln::channelmanager::{ChainParameters, ChannelManager}; use lightning::ln::peer_handler::{MessageHandler,PeerManager,SocketDescriptor,IgnoringMessageHandler}; use lightning::ln::msgs::DecodeError; use lightning::ln::script::ShutdownScript; use lightning::routing::router::get_route; use lightning::routing::network_graph::{NetGraphMsgHandler, NetworkGraph}; use lightning::util::config::UserConfig; use lightning::util::errors::APIError; use lightning::util::events::Event; use lightning::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState}; use lightning::util::logger::Logger; use lightning::util::ser::Readable; use utils::test_logger; use utils::test_persister::TestPersister; use bitcoin::secp256k1::key::{PublicKey,SecretKey}; use bitcoin::secp256k1::recovery::RecoverableSignature; use bitcoin::secp256k1::Secp256k1; use std::cell::RefCell; use std::collections::{HashMap, hash_map}; use std::cmp; use std::sync::{Arc, Mutex}; use std::sync::atomic::{AtomicU64,AtomicUsize,Ordering}; #[inline] pub fn slice_to_be16(v: &[u8]) -> u16 { ((v[0] as u16) << 8*1) | ((v[1] as u16) << 8*0) } #[inline] pub fn slice_to_be24(v: &[u8]) -> u32 { ((v[0] as u32) << 8*2) | ((v[1] as u32) << 8*1) | ((v[2] as u32) << 8*0) } #[inline] pub fn slice_to_be32(v: &[u8]) -> u32 { ((v[0] as u32) << 8*3) | ((v[1] as u32) << 8*2) | ((v[2] as u32) << 8*1) | ((v[3] as u32) << 8*0) } #[inline] pub fn be64_to_array(u: u64) -> [u8; 8] { let mut v = [0; 8]; v[0] = ((u >> 8*7) & 0xff) as u8; v[1] = ((u >> 8*6) & 0xff) as u8; v[2] = ((u >> 8*5) & 0xff) as u8; v[3] = ((u >> 8*4) & 0xff) as u8; v[4] = ((u >> 8*3) & 0xff) as u8; v[5] = ((u >> 8*2) & 0xff) as u8; v[6] = ((u >> 8*1) & 0xff) as u8; v[7] = ((u >> 8*0) & 0xff) as u8; v } struct InputData { data: Vec<u8>, read_pos: AtomicUsize, } impl InputData { fn get_slice(&self, len: usize) -> Option<&[u8]> { let old_pos = self.read_pos.fetch_add(len, Ordering::AcqRel); if self.data.len() < old_pos + len { return None; } Some(&self.data[old_pos..old_pos + len]) } } struct FuzzEstimator { input: Arc<InputData>, } impl FeeEstimator for FuzzEstimator { fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 { //TODO: We should actually be testing at least much more than 64k... match self.input.get_slice(2) { Some(slice) => cmp::max(slice_to_be16(slice) as u32, 253), None => 253 } } } struct TestBroadcaster { txn_broadcasted: Mutex<Vec<Transaction>>, } impl BroadcasterInterface for TestBroadcaster { fn broadcast_transaction(&self, tx: &Transaction) { self.txn_broadcasted.lock().unwrap().push(tx.clone()); } } #[derive(Clone)] struct Peer<'a> { id: u8, peers_connected: &'a RefCell<[bool; 256]>, } impl<'a> SocketDescriptor for Peer<'a> { fn send_data(&mut self, data: &[u8], _resume_read: bool) -> usize { data.len() } fn disconnect_socket(&mut self) { assert!(self.peers_connected.borrow()[self.id as usize]); self.peers_connected.borrow_mut()[self.id as usize] = false; } } impl<'a> PartialEq for Peer<'a> { fn eq(&self, other: &Self) -> bool { self.id == other.id } } impl<'a> Eq for Peer<'a> {} impl<'a> std::hash::Hash for Peer<'a> { fn hash<H : std::hash::Hasher>(&self, h: &mut H) { self.id.hash(h) } } type ChannelMan = ChannelManager< EnforcingSigner, Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>, Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>, Arc<dyn Logger>>; type PeerMan<'a> = PeerManager<Peer<'a>, Arc<ChannelMan>, Arc<NetGraphMsgHandler<Arc<dyn chain::Access>, Arc<dyn Logger>>>, Arc<dyn Logger>, IgnoringMessageHandler>; struct MoneyLossDetector<'a> { manager: Arc<ChannelMan>, monitor: Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>, handler: PeerMan<'a>, peers: &'a RefCell<[bool; 256]>, funding_txn: Vec<Transaction>, txids_confirmed: HashMap<Txid, usize>, header_hashes: Vec<(BlockHash, u32)>, height: usize, max_height: usize, blocks_connected: u32, } impl<'a> MoneyLossDetector<'a> { pub fn new(peers: &'a RefCell<[bool; 256]>, manager: Arc<ChannelMan>, monitor: Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>, handler: PeerMan<'a>) -> Self { MoneyLossDetector { manager, monitor, handler, peers, funding_txn: Vec::new(), txids_confirmed: HashMap::new(), header_hashes: vec![(genesis_block(Network::Bitcoin).block_hash(), 0)], height: 0, max_height: 0, blocks_connected: 0, } } fn connect_block(&mut self, all_txn: &[Transaction]) { let mut txdata = Vec::with_capacity(all_txn.len()); for (idx, tx) in all_txn.iter().enumerate() { let txid = tx.txid(); match self.txids_confirmed.entry(txid) { hash_map::Entry::Vacant(e) => { e.insert(self.height); txdata.push((idx + 1, tx)); }, _ => {}, } } self.blocks_connected += 1; let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height].0, merkle_root: Default::default(), time: self.blocks_connected, bits: 42, nonce: 42 }; self.height += 1; self.manager.transactions_confirmed(&header, &txdata, self.height as u32); self.manager.best_block_updated(&header, self.height as u32); (*self.monitor).transactions_confirmed(&header, &txdata, self.height as u32); (*self.monitor).best_block_updated(&header, self.height as u32); if self.header_hashes.len() > self.height { self.header_hashes[self.height] = (header.block_hash(), self.blocks_connected); } else { assert_eq!(self.header_hashes.len(), self.height); self.header_hashes.push((header.block_hash(), self.blocks_connected)); } self.max_height = cmp::max(self.height, self.max_height); } fn disconnect_block(&mut self) { if self.height > 0 && (self.max_height < 6 || self.height >= self.max_height - 6) { let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height - 1].0, merkle_root: Default::default(), time: self.header_hashes[self.height].1, bits: 42, nonce: 42 }; self.manager.block_disconnected(&header, self.height as u32); self.monitor.block_disconnected(&header, self.height as u32); self.height -= 1; let removal_height = self.height; self.txids_confirmed.retain(|_, height| { removal_height != *height }); } } } impl<'a> Drop for MoneyLossDetector<'a> { fn drop(&mut self) { if !::std::thread::panicking() { // Disconnect all peers for (idx, peer) in self.peers.borrow().iter().enumerate() { if *peer { self.handler.socket_disconnected(&Peer{id: idx as u8, peers_connected: &self.peers}); } } // Force all channels onto the chain (and time out claim txn) self.manager.force_close_all_channels(); } } } struct KeyProvider { node_secret: SecretKey, counter: AtomicU64, } impl KeysInterface for KeyProvider { type Signer = EnforcingSigner; fn get_node_secret(&self) -> SecretKey { self.node_secret.clone() } fn get_destination_script(&self) -> Script { let secp_ctx = Secp256k1::signing_only(); let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(); let our_channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize()); Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_monitor_claim_key_hash[..]).into_script() } fn get_shutdown_scriptpubkey(&self) -> ShutdownScript { let secp_ctx = Secp256k1::signing_only(); let secret_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]).unwrap(); let pubkey_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &secret_key).serialize()); ShutdownScript::new_p2wpkh(&pubkey_hash) } fn get_channel_signer(&self, inbound: bool, channel_value_satoshis: u64) -> EnforcingSigner { let ctr = self.counter.fetch_add(1, Ordering::Relaxed) as u8; let secp_ctx = Secp256k1::signing_only(); EnforcingSigner::new(if inbound { InMemorySigner::new( &secp_ctx, SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ctr]).unwrap(), SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, ctr]).unwrap(), SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, ctr]).unwrap(), SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, ctr]).unwrap(), SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, ctr]).unwrap(), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, ctr], channel_value_satoshis, [0; 32] ) } else { InMemorySigner::new( &secp_ctx, SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, ctr]).unwrap(), SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, ctr]).unwrap(), SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, ctr]).unwrap(), SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, ctr]).unwrap(), SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, ctr]).unwrap(), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, ctr], channel_value_satoshis, [0; 32] ) }) } fn get_secure_random_bytes(&self) -> [u8; 32] { let ctr = self.counter.fetch_add(1, Ordering::Relaxed); [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, (ctr >> 8*7) as u8, (ctr >> 8*6) as u8, (ctr >> 8*5) as u8, (ctr >> 8*4) as u8, (ctr >> 8*3) as u8, (ctr >> 8*2) as u8, (ctr >> 8*1) as u8, 14, (ctr >> 8*0) as u8] } fn read_chan_signer(&self, mut data: &[u8]) -> Result<EnforcingSigner, DecodeError> { let inner: InMemorySigner = Readable::read(&mut data)?; let state = Arc::new(Mutex::new(EnforcementState::new())); Ok(EnforcingSigner::new_with_revoked( inner, state, false )) } fn sign_invoice(&self, _invoice_preimage: Vec<u8>) -> Result<RecoverableSignature, ()> { unreachable!() } } #[inline] pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) { let input = Arc::new(InputData { data: data.to_vec(), read_pos: AtomicUsize::new(0), }); let fee_est = Arc::new(FuzzEstimator { input: input.clone(), }); macro_rules! get_slice { ($len: expr) => { match input.get_slice($len as usize) { Some(slice) => slice, None => return, } } } macro_rules! get_pubkey { () => { match PublicKey::from_slice(get_slice!(33)) { Ok(key) => key, Err(_) => return, } } } let our_network_key = match SecretKey::from_slice(get_slice!(32)) { Ok(key) => key, Err(_) => return, }; let broadcast = Arc::new(TestBroadcaster{ txn_broadcasted: Mutex::new(Vec::new()) }); let monitor = Arc::new(chainmonitor::ChainMonitor::new(None, broadcast.clone(), Arc::clone(&logger), fee_est.clone(), Arc::new(TestPersister{}))); let keys_manager = Arc::new(KeyProvider { node_secret: our_network_key.clone(), counter: AtomicU64::new(0) }); let mut config = UserConfig::default(); config.channel_options.forwarding_fee_proportional_millionths = slice_to_be32(get_slice!(4)); config.channel_options.announced_channel = get_slice!(1)[0] != 0; let network = Network::Bitcoin; let params = ChainParameters { network, best_block: BestBlock::from_genesis(network), }; let channelmanager = Arc::new(ChannelManager::new(fee_est.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, params)); let our_id = PublicKey::from_secret_key(&Secp256k1::signing_only(), &keys_manager.get_node_secret()); let network_graph = NetworkGraph::new(genesis_block(network).block_hash()); let net_graph_msg_handler = Arc::new(NetGraphMsgHandler::new(network_graph, None, Arc::clone(&logger))); let peers = RefCell::new([false; 256]); let mut loss_detector = MoneyLossDetector::new(&peers, channelmanager.clone(), monitor.clone(), PeerManager::new(MessageHandler { chan_handler: channelmanager.clone(), route_handler: net_graph_msg_handler.clone(), }, our_network_key, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0], Arc::clone(&logger), IgnoringMessageHandler{})); let mut should_forward = false; let mut payments_received: Vec<PaymentHash> = Vec::new(); let mut payments_sent = 0; let mut pending_funding_generation: Vec<([u8; 32], u64, Script)> = Vec::new(); let mut pending_funding_signatures = HashMap::new(); loop { match get_slice!(1)[0] { 0 => { let mut new_id = 0; for i in 1..256 { if !peers.borrow()[i-1] { new_id = i; break; } } if new_id == 0 { return; } loss_detector.handler.new_outbound_connection(get_pubkey!(), Peer{id: (new_id - 1) as u8, peers_connected: &peers}).unwrap(); peers.borrow_mut()[new_id - 1] = true; }, 1 => { let mut new_id = 0; for i in 1..256 { if !peers.borrow()[i-1] { new_id = i; break; } } if new_id == 0 { return; } loss_detector.handler.new_inbound_connection(Peer{id: (new_id - 1) as u8, peers_connected: &peers}).unwrap(); peers.borrow_mut()[new_id - 1] = true; }, 2 => { let peer_id = get_slice!(1)[0]; if !peers.borrow()[peer_id as usize] { return; } loss_detector.handler.socket_disconnected(&Peer{id: peer_id, peers_connected: &peers}); peers.borrow_mut()[peer_id as usize] = false; }, 3 => { let peer_id = get_slice!(1)[0]; if !peers.borrow()[peer_id as usize] { return; } match loss_detector.handler.read_event(&mut Peer{id: peer_id, peers_connected: &peers}, get_slice!(get_slice!(1)[0])) { Ok(res) => assert!(!res), Err(_) => { peers.borrow_mut()[peer_id as usize] = false; } } }, 4 => { let value = slice_to_be24(get_slice!(3)) as u64; let route = match get_route(&our_id, &net_graph_msg_handler.network_graph, &get_pubkey!(), None, None, &Vec::new(), value, 42, Arc::clone(&logger)) { Ok(route) => route, Err(_) => return, }; let mut payment_hash = PaymentHash([0; 32]); payment_hash.0[0..8].copy_from_slice(&be64_to_array(payments_sent)); let mut sha = Sha256::engine(); sha.input(&payment_hash.0[..]); payment_hash.0 = Sha256::from_engine(sha).into_inner(); payments_sent += 1; match channelmanager.send_payment(&route, payment_hash, &None) { Ok(_) => {}, Err(_) => return, } }, 15 => { let value = slice_to_be24(get_slice!(3)) as u64; let mut route = match get_route(&our_id, &net_graph_msg_handler.network_graph, &get_pubkey!(), None, None, &Vec::new(), value, 42, Arc::clone(&logger)) { Ok(route) => route, Err(_) => return, }; route.paths.push(route.paths[0].clone()); let mut payment_hash = PaymentHash([0; 32]); payment_hash.0[0..8].copy_from_slice(&be64_to_array(payments_sent)); let mut sha = Sha256::engine(); sha.input(&payment_hash.0[..]); payment_hash.0 = Sha256::from_engine(sha).into_inner(); payments_sent += 1; let mut payment_secret = PaymentSecret([0; 32]); payment_secret.0[0..8].copy_from_slice(&be64_to_array(payments_sent)); payments_sent += 1; match channelmanager.send_payment(&route, payment_hash, &Some(payment_secret)) { Ok(_) => {}, Err(_) => return, } }, 5 => { let peer_id = get_slice!(1)[0]; if !peers.borrow()[peer_id as usize] { return; } let their_key = get_pubkey!(); let chan_value = slice_to_be24(get_slice!(3)) as u64; let push_msat_value = slice_to_be24(get_slice!(3)) as u64; if channelmanager.create_channel(their_key, chan_value, push_msat_value, 0, None).is_err() { return; } }, 6 => { let mut channels = channelmanager.list_channels(); let channel_id = get_slice!(1)[0] as usize; if channel_id >= channels.len() { return; } channels.sort_by(|a, b| { a.channel_id.cmp(&b.channel_id) }); if channelmanager.close_channel(&channels[channel_id].channel_id).is_err() { return; } }, 7 => { if should_forward { channelmanager.process_pending_htlc_forwards(); should_forward = false; } }, 8 => { for payment in payments_received.drain(..) { // SHA256 is defined as XOR of all input bytes placed in the first byte, and 0s // for the remaining bytes. Thus, if not all remaining bytes are 0s we cannot // fulfill this HTLC, but if they are, we can just take the first byte and // place that anywhere in our preimage. if &payment.0[1..] != &[0; 31] { channelmanager.fail_htlc_backwards(&payment); } else { let mut payment_preimage = PaymentPreimage([0; 32]); payment_preimage.0[0] = payment.0[0]; channelmanager.claim_funds(payment_preimage); } } }, 16 => { let payment_preimage = PaymentPreimage(keys_manager.get_secure_random_bytes()); let mut sha = Sha256::engine(); sha.input(&payment_preimage.0[..]); let payment_hash = PaymentHash(Sha256::from_engine(sha).into_inner()); // Note that this may fail - our hashes may collide and we'll end up trying to // double-register the same payment_hash. let _ = channelmanager.create_inbound_payment_for_hash(payment_hash, None, 1, 0); }, 9 => { for payment in payments_received.drain(..) { channelmanager.fail_htlc_backwards(&payment);
10 => { 'outer_loop: for funding_generation in pending_funding_generation.drain(..) { let mut tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: vec![TxOut { value: funding_generation.1, script_pubkey: funding_generation.2, }] }; let funding_output = 'search_loop: loop { let funding_txid = tx.txid(); if let None = loss_detector.txids_confirmed.get(&funding_txid) { let outpoint = OutPoint { txid: funding_txid, index: 0 }; for chan in channelmanager.list_channels() { if chan.channel_id == outpoint.to_channel_id() { tx.version += 1; continue 'search_loop; } } break outpoint; } tx.version += 1; if tx.version > 0xff { continue 'outer_loop; } }; if let Err(e) = channelmanager.funding_transaction_generated(&funding_generation.0, tx.clone()) { // It's possible the channel has been closed in the mean time, but any other // failure may be a bug. if let APIError::ChannelUnavailable { err } = e { assert_eq!(err, "No such channel"); } else { panic!(); } } pending_funding_signatures.insert(funding_output, tx); } }, 11 => { let mut txn = broadcast.txn_broadcasted.lock().unwrap().split_off(0); if !txn.is_empty() { loss_detector.connect_block(&txn[..]); for _ in 2..100 { loss_detector.connect_block(&[]); } } for tx in txn.drain(..) { loss_detector.funding_txn.push(tx); } }, 12 => { let txlen = slice_to_be16(get_slice!(2)); if txlen == 0 { loss_detector.connect_block(&[]); } else { let txres: Result<Transaction, _> = deserialize(get_slice!(txlen)); if let Ok(tx) = txres { let mut output_val = 0; for out in tx.output.iter() { if out.value > 21_000_000_0000_0000 { return; } output_val += out.value; if output_val > 21_000_000_0000_0000 { return; } } loss_detector.connect_block(&[tx]); } else { return; } } }, 13 => { loss_detector.disconnect_block(); }, 14 => { let mut channels = channelmanager.list_channels(); let channel_id = get_slice!(1)[0] as usize; if channel_id >= channels.len() { return; } channels.sort_by(|a, b| { a.channel_id.cmp(&b.channel_id) }); channelmanager.force_close_channel(&channels[channel_id].channel_id).unwrap(); }, // 15 is above _ => return, } loss_detector.handler.process_events(); for event in loss_detector.manager.get_and_clear_pending_events() { match event { Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, output_script, .. } => { pending_funding_generation.push((temporary_channel_id, channel_value_satoshis, output_script)); }, Event::PaymentReceived { payment_hash, .. } => { //TODO: enhance by fetching random amounts from fuzz input? payments_received.push(payment_hash); }, Event::PendingHTLCsForwardable {..} => { should_forward = true; }, _ => {}, } } } } pub fn full_stack_test<Out: test_logger::Output>(data: &[u8], out: Out) { let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new("".to_owned(), out)); do_test(data, &logger); } #[no_mangle] pub extern "C" fn full_stack_run(data: *const u8, datalen: usize) { let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new("".to_owned(), test_logger::DevNull {})); do_test(unsafe { std::slice::from_raw_parts(data, datalen) }, &logger); } #[cfg(test)] mod tests { use lightning::util::logger::{Logger, Record}; use std::collections::HashMap; use std::sync::{Arc, Mutex}; struct TrackingLogger { /// (module, message) -> count pub lines: Mutex<HashMap<(String, String), usize>>, } impl Logger for TrackingLogger { fn log(&self, record: &Record) { *self.lines.lock().unwrap().entry((record.module_path.to_string(), format!("{}", record.args))).or_insert(0) += 1; println!("{:<5} [{} : {}, {}] {}", record.level.to_string(), record.module_path, record.file, record.line, record.args); } } #[test] fn test_no_existing_test_breakage() { // To avoid accidentally causing all existing fuzz test cases to be useless by making minor // changes (such as requesting feerate info in a new place), we run a pretty full // step-through with two peers and HTLC forwarding here. Obviously this is pretty finicky, // so this should be updated pretty liberally, but at least we'll know when changes occur. // If nothing else, this test serves as a pretty great initial full_stack_target seed. // What each byte represents is broken down below, and then everything is concatenated into // one large test at the end (you want %s/ -.*//g %s/\n\| \|\t\|\///g). // Following BOLT 8, lightning message on the wire are: 2-byte encrypted message length + // 16-byte MAC of the encrypted message length + encrypted Lightning message + 16-byte MAC // of the Lightning message // I.e 2nd inbound read, len 18 : 0006 (encrypted message length) + 03000000000000000000000000000000 (MAC of the encrypted message length) // Len 22 : 0010 00000000 (encrypted lightning message) + 03000000000000000000000000000000 (MAC of the Lightning message) // Writing new code generating transactions and see a new failure ? Don't forget to add input for the FuzzEstimator ! // 0100000000000000000000000000000000000000000000000000000000000000 - our network key // 00000000 - fee_proportional_millionths // 01 - announce_channels_publicly // // 00 - new outbound connection with id 0 // 030000000000000000000000000000000000000000000000000000000000000002 - peer's pubkey // 030032 - inbound read from peer id 0 of len 50 // 00 030000000000000000000000000000000000000000000000000000000000000002 03000000000000000000000000000000 - noise act two (0||pubkey||mac) // // 030012 - inbound read from peer id 0 of len 18 // 000a 03000000000000000000000000000000 - message header indicating message length 10 // 03001a - inbound read from peer id 0 of len 26 // 0010 00022000 00022000 03000000000000000000000000000000 - init message (type 16) with static_remotekey (0x2000) and mac // // 030012 - inbound read from peer id 0 of len 18 // 0141 03000000000000000000000000000000 - message header indicating message length 321 // 0300fe - inbound read from peer id 0 of len 254 // 0020 7500000000000000000000000000000000000000000000000000000000000000 ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679 000000000000c350 0000000000000000 0000000000000162 ffffffffffffffff 0000000000000222 0000000000000000 000000fd 0006 01e3 030000000000000000000000000000000000000000000000000000000000000001 030000000000000000000000000000000000000000000000000000000000000002 030000000000000000000000000000000000000000000000000000000000000003 030000000000000000000000000000000000000000000000000000000000000004 - beginning of open_channel message // 030053 - inbound read from peer id 0 of len 83 // 030000000000000000000000000000000000000000000000000000000000000005 020900000000000000000000000000000000000000000000000000000000000000 01 03000000000000000000000000000000 - rest of open_channel and mac // // 00fd00fd00fd - Three feerate requests (all returning min feerate, which our open_channel also uses) (gonna be ingested by FuzzEstimator) // - client should now respond with accept_channel (CHECK 1: type 33 to peer 03000000) // // 030012 - inbound read from peer id 0 of len 18 // 0084 03000000000000000000000000000000 - message header indicating message length 132 // 030094 - inbound read from peer id 0 of len 148 // 0022 ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679 3d00000000000000000000000000000000000000000000000000000000000000 0000 00000000000000000000000000000000000000000000000000000000000000210100000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - funding_created and mac // - client should now respond with funding_signed (CHECK 2: type 35 to peer 03000000) // // 0c005e - connect a block with one transaction of len 94 // 020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae0000000000000000000000000000000000000000000000000000000000000000000000 - the funding transaction // 0c0000 - connect a block with no transactions // 0c0000 - connect a block with no transactions // 0c0000 - connect a block with no transactions // 0c0000 - connect a block with no transactions // 0c0000 - connect a block with no transactions // 0c0000 - connect a block with no transactions // 0c0000 - connect a block with no transactions // 0c0000 - connect a block with no transactions // 0c0000 - connect a block with no transactions // 0c0000 - connect a block with no transactions // 0c0000 - connect a block with no transactions // 0c0000 - connect a block with no transactions // - by now client should have sent a funding_locked (CHECK 3: SendFundingLocked to 03000000 for chan 3d000000) // // 030012 - inbound read from peer id 0 of len 18 // 0043 03000000000000000000000000000000 - message header indicating message length 67 // 030053 - inbound read from peer id 0 of len 83 // 0024 3d00000000000000000000000000000000000000000000000000000000000000 020800000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - funding_locked and mac // // 01 - new inbound connection with id 1 // 030132 - inbound read from peer id 1 of len 50 // 0003000000000000000000000000000000000000000000000000000000000000000703000000000000000000000000000000 - inbound noise act 1 // 030142 - inbound read from peer id 1 of len 66 // 000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000 - inbound noise act 3 // // 030112 - inbound read from peer id 1 of len 18 // 000a 01000000000000000000000000000000 - message header indicating message length 10 // 03011a - inbound read from peer id 1 of len 26 // 0010 00022000 00022000 01000000000000000000000000000000 - init message (type 16) with static_remotekey (0x2000) and mac // // 05 01 030200000000000000000000000000000000000000000000000000000000000000 00c350 0003e8 - create outbound channel to peer 1 for 50k sat // 00fd - One feerate requests (all returning min feerate) (gonna be ingested by FuzzEstimator) // // 030112 - inbound read from peer id 1 of len 18 // 0110 01000000000000000000000000000000 - message header indicating message length 272 // 0301ff - inbound read from peer id 1 of len 255 // 0021 0000000000000000000000000000000000000000000000000000000000000e05 0000000000000162 00000000004c4b40 00000000000003e8 00000000000003e8 00000002 03f0 0005 030000000000000000000000000000000000000000000000000000000000000100 030000000000000000000000000000000000000000000000000000000000000200 030000000000000000000000000000000000000000000000000000000000000300 030000000000000000000000000000000000000000000000000000000000000400 030000000000000000000000000000000000000000000000000000000000000500 02660000000000000000000000000000 - beginning of accept_channel // 030121 - inbound read from peer id 1 of len 33 // 0000000000000000000000000000000000 01000000000000000000000000000000 - rest of accept_channel and mac // // 0a - create the funding transaction (client should send funding_created now) // // 030112 - inbound read from peer id 1 of len 18 // 0062 01000000000000000000000000000000 - message header indicating message length 98 // 030172 - inbound read from peer id 1 of len 114 // 0023 3a00000000000000000000000000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000007c0001000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - funding_signed message and mac // // 0b - broadcast funding transaction // - by now client should have sent a funding_locked (CHECK 4: SendFundingLocked to 03020000 for chan 3f000000) // // 030112 - inbound read from peer id 1 of len 18 // 0043 01000000000000000000000000000000 - message header indicating message length 67 // 030153 - inbound read from peer id 1 of len 83 // 0024 3a00000000000000000000000000000000000000000000000000000000000000 026700000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - funding_locked and mac // // 030012 - inbound read from peer id 0 of len 18 // 05ac 03000000000000000000000000000000 - message header indicating message length 1452 // 0300ff - inbound read from peer id 0 of len 255 // 0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000000 0000000000003e80 ff00000000000000000000000000000000000000000000000000000000000000 000003f0 00 030000000000000000000000000000000000000000000000000000000000000555 0000000e000001000000000000000003e8000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - beginning of update_add_htlc from 0 to 1 via client // 0300ff - inbound read from peer id 0 of len 255 // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff // 0300ff - inbound read from peer id 0 of len 255 // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff // 0300ff - inbound read from peer id 0 of len 255 // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff // 0300ff - inbound read from peer id 0 of len 255 // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff // 0300c1 - inbound read from peer id 0 of len 193 // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff 4e00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac // // 030012 - inbound read from peer id 0 of len 18 // 0064 03000000000000000000000000000000 - message header indicating message length 100 // 030074 - inbound read from peer id 0 of len 116 // 0084 3d00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000300100000000000000000000000000000000000000000000000000000000000000 0000 03000000000000000000000000000000 - commitment_signed and mac // - client should now respond with revoke_and_ack and commitment_signed (CHECK 5/6: types 133 and 132 to peer 03000000) // // 030012 - inbound read from peer id 0 of len 18 // 0063 03000000000000000000000000000000 - message header indicating message length 99 // 030073 - inbound read from peer id 0 of len 115 // 0085 3d00000000000000000000000000000000000000000000000000000000000000 0900000000000000000000000000000000000000000000000000000000000000 020b00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac // // 07 - process the now-pending HTLC forward // - client now sends id 1 update_add_htlc and commitment_signed (CHECK 7: SendHTLCs event for node 03020000 with 1 HTLCs for channel 3f000000) // // - we respond with commitment_signed then revoke_and_ack (a weird, but valid, order) // 030112 - inbound read from peer id 1 of len 18 // 0064 01000000000000000000000000000000 - message header indicating message length 100 // 030174 - inbound read from peer id 1 of len 116 // 0084 3a00000000000000000000000000000000000000000000000000000000000000 000000000000000000000000000000000000000000000000000000000000006a0001000000000000000000000000000000000000000000000000000000000000 0000 01000000000000000000000000000000 - commitment_signed and mac // // 030112 - inbound read from peer id 1 of len 18 // 0063 01000000000000000000000000000000 - message header indicating message length 99 // 030173 - inbound read from peer id 1 of len 115 // 0085 3a00000000000000000000000000000000000000000000000000000000000000 6600000000000000000000000000000000000000000000000000000000000000 026400000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - revoke_and_ack and mac // // 030112 - inbound read from peer id 1 of len 18 // 004a 01000000000000000000000000000000 - message header indicating message length 74 // 03015a - inbound read from peer id 1 of len 90 // 0082 3a00000000000000000000000000000000000000000000000000000000000000 0000000000000000 ff00888888888888888888888888888888888888888888888888888888888888 01000000000000000000000000000000 - update_fulfill_htlc and mac // - client should immediately claim the pending HTLC from peer 0 (CHECK 8: SendFulfillHTLCs for node 03000000 with preimage ff00888888 for channel 3d000000) // // 030112 - inbound read from peer id 1 of len 18 // 0064 01000000000000000000000000000000 - message header indicating message length 100 // 030174 - inbound read from peer id 1 of len 116 // 0084 3a00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000100001000000000000000000000000000000000000000000000000000000000000 0000 01000000000000000000000000000000 - commitment_signed and mac // // 030112 - inbound read from peer id 1 of len 18 // 0063 01000000000000000000000000000000 - message header indicating message length 99 // 030173 - inbound read from peer id 1 of len 115 // 0085 3a00000000000000000000000000000000000000000000000000000000000000 6700000000000000000000000000000000000000000000000000000000000000 026500000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - revoke_and_ack and mac // // - before responding to the commitment_signed generated above, send a new HTLC // 030012 - inbound read from peer id 0 of len 18 // 05ac 03000000000000000000000000000000 - message header indicating message length 1452 // 0300ff - inbound read from peer id 0 of len 255 // 0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000001 0000000000003e80 ff00000000000000000000000000000000000000000000000000000000000000 000003f0 00 030000000000000000000000000000000000000000000000000000000000000555 0000000e000001000000000000000003e8000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - beginning of update_add_htlc from 0 to 1 via client // 0300ff - inbound read from peer id 0 of len 255 // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff // 0300ff - inbound read from peer id 0 of len 255 // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff // 0300ff - inbound read from peer id 0 of len 255 // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff // 0300ff - inbound read from peer id 0 of len 255 // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff // 0300c1 - inbound read from peer id 0 of len 193 // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff 4e00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac // // - now respond to the update_fulfill_htlc+commitment_signed messages the client sent to peer 0 // 030012 - inbound read from peer id 0 of len 18 // 0063 03000000000000000000000000000000 - message header indicating message length 99 // 030073 - inbound read from peer id 0 of len 115 // 0085 3d00000000000000000000000000000000000000000000000000000000000000 0800000000000000000000000000000000000000000000000000000000000000 020a00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac // - client should now respond with revoke_and_ack and commitment_signed (CHECK 5/6 duplicates) // // 030012 - inbound read from peer id 0 of len 18 // 0064 03000000000000000000000000000000 - message header indicating message length 100 // 030074 - inbound read from peer id 0 of len 116 // 0084 3d00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000c30100000000000000000000000000000000000000000000000000000000000000 0000 03000000000000000000000000000000 - commitment_signed and mac // // 030012 - inbound read from peer id 0 of len 18 // 0063 03000000000000000000000000000000 - message header indicating message length 99 // 030073 - inbound read from peer id 0 of len 115 // 0085 3d00000000000000000000000000000000000000000000000000000000000000 0b00000000000000000000000000000000000000000000000000000000000000 020d00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac // // 07 - process the now-pending HTLC forward // - client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate) // - we respond with revoke_and_ack, then commitment_signed, then update_fail_htlc // // 030112 - inbound read from peer id 1 of len 18 // 0064 01000000000000000000000000000000 - message header indicating message length 100 // 030174 - inbound read from peer id 1 of len 116 // 0084 3a00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000390001000000000000000000000000000000000000000000000000000000000000 0000 01000000000000000000000000000000 - commitment_signed and mac // // 030112 - inbound read from peer id 1 of len 18 // 0063 01000000000000000000000000000000 - message header indicating message length 99 // 030173 - inbound read from peer id 1 of len 115 // 0085 3a00000000000000000000000000000000000000000000000000000000000000 6400000000000000000000000000000000000000000000000000000000000000 027000000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - revoke_and_ack and mac // // 030112 - inbound read from peer id 1 of len 18 // 002c 01000000000000000000000000000000 - message header indicating message length 44 // 03013c - inbound read from peer id 1 of len 60 // 0083 3a00000000000000000000000000000000000000000000000000000000000000 0000000000000001 0000 01000000000000000000000000000000 - update_fail_htlc and mac // // 030112 - inbound read from peer id 1 of len 18 // 0064 01000000000000000000000000000000 - message header indicating message length 100 // 030174 - inbound read from peer id 1 of len 116 // 0084 3a00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000390001000000000000000000000000000000000000000000000000000000000000 0000 01000000000000000000000000000000 - commitment_signed and mac // // 030112 - inbound read from peer id 1 of len 18 // 0063 01000000000000000000000000000000 - message header indicating message length 99 // 030173 - inbound read from peer id 1 of len 115 // 0085 3a00000000000000000000000000000000000000000000000000000000000000 6500000000000000000000000000000000000000000000000000000000000000 027100000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - revoke_and_ack and mac // // 07 - process the now-pending HTLC forward // - client now sends id 0 update_fail_htlc and commitment_signed (CHECK 9) // - now respond to the update_fail_htlc+commitment_signed messages the client sent to peer 0 // // 030012 - inbound read from peer id 0 of len 18 // 0063 03000000000000000000000000000000 - message header indicating message length 99 // 030073 - inbound read from peer id 0 of len 115 // 0085 3d00000000000000000000000000000000000000000000000000000000000000 0a00000000000000000000000000000000000000000000000000000000000000 020c00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac // // 030012 - inbound read from peer id 0 of len 18 // 0064 03000000000000000000000000000000 - message header indicating message length 100 // 030074 - inbound read from peer id 0 of len 116 // 0084 3d00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000320100000000000000000000000000000000000000000000000000000000000000 0000 03000000000000000000000000000000 - commitment_signed and mac // - client should now respond with revoke_and_ack (CHECK 5 duplicate) // // 030012 - inbound read from peer id 0 of len 18 // 05ac 03000000000000000000000000000000 - message header indicating message length 1452 // 0300ff - inbound read from peer id 0 of len 255 // 0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000002 00000000000b0838 ff00000000000000000000000000000000000000000000000000000000000000 000003f0 00 030000000000000000000000000000000000000000000000000000000000000555 0000000e000001000000000000000927c0000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - beginning of update_add_htlc from 0 to 1 via client // 0300ff - inbound read from peer id 0 of len 255 // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff // 0300ff - inbound read from peer id 0 of len 255 // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff // 0300ff - inbound read from peer id 0 of len 255 // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff // 0300ff - inbound read from peer id 0 of len 255 // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff // 0300c1 - inbound read from peer id 0 of len 193 // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff 4b00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac // // 030012 - inbound read from peer id 0 of len 18 // 00a4 03000000000000000000000000000000 - message header indicating message length 164 // 0300b4 - inbound read from peer id 0 of len 180 // 0084 3d00000000000000000000000000000000000000000000000000000000000000 00000000000000000000000000000000000000000000000000000000000000750100000000000000000000000000000000000000000000000000000000000000 0001 00000000000000000000000000000000000000000000000000000000000000670500000000000000000000000000000000000000000000000000000000000006 03000000000000000000000000000000 - commitment_signed and mac // - client should now respond with revoke_and_ack and commitment_signed (CHECK 5/6 duplicates) // // 030012 - inbound read from peer id 0 of len 18 // 0063 03000000000000000000000000000000 - message header indicating message length 99 // 030073 - inbound read from peer id 0 of len 115 // 0085 3d00000000000000000000000000000000000000000000000000000000000000 0d00000000000000000000000000000000000000000000000000000000000000 020f00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac // // 07 - process the now-pending HTLC forward // - client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate) // // 0c007d - connect a block with one transaction of len 125 // 02000000013a000000000000000000000000000000000000000000000000000000000000000000000000000000800258020000000000002200204b0000000000000000000000000000000000000000000000000000000000000014c0000000000000160014280000000000000000000000000000000000000005000020 - the commitment transaction for channel 3f00000000000000000000000000000000000000000000000000000000000000 // // 0c005e - connect a block with one transaction of len 94 // 0200000001730000000000000000000000000000000000000000000000000000000000000000000000000000000001a701000000000000220020b20000000000000000000000000000000000000000000000000000000000000000000000 - the HTLC timeout transaction // 0c0000 - connect a block with no transactions // 0c0000 - connect a block with no transactions // 0c0000 - connect a block with no transactions // 0c0000 - connect a block with no transactions // 0c0000 - connect a block with no transactions // // 07 - process the now-pending HTLC forward // - client now fails the HTLC backwards as it was unable to extract the payment preimage (CHECK 9 duplicate and CHECK 10) let logger = Arc::new(TrackingLogger { lines: Mutex::new(HashMap::new()) }); super::do_test(&::hex::decode("01000000000000000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000000020300320003000000000000000000000000000000000000000000000000000000000000000203000000000000000000000000000000030012000a0300000000000000000000000000000003001a00100002200000022000030000000000000000000000000000000300120141030000000000000000000000000000000300fe00207500000000000000000000000000000000000000000000000000000000000000ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679000000000000c35000000000000000000000000000000162ffffffffffffffff00000000000002220000000000000000000000fd000601e3030000000000000000000000000000000000000000000000000000000000000001030000000000000000000000000000000000000000000000000000000000000002030000000000000000000000000000000000000000000000000000000000000003030000000000000000000000000000000000000000000000000000000000000004030053030000000000000000000000000000000000000000000000000000000000000005020900000000000000000000000000000000000000000000000000000000000000010300000000000000000000000000000000fd00fd00fd0300120084030000000000000000000000000000000300940022ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb1819096793d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000210100000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000c005e020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae00000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c000003001200430300000000000000000000000000000003005300243d0000000000000000000000000000000000000000000000000000000000000002080000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000010301320003000000000000000000000000000000000000000000000000000000000000000703000000000000000000000000000000030142000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000030112000a0100000000000000000000000000000003011a0010000220000002200001000000000000000000000000000000050103020000000000000000000000000000000000000000000000000000000000000000c3500003e800fd0301120110010000000000000000000000000000000301ff00210000000000000000000000000000000000000000000000000000000000000e05000000000000016200000000004c4b4000000000000003e800000000000003e80000000203f00005030000000000000000000000000000000000000000000000000000000000000100030000000000000000000000000000000000000000000000000000000000000200030000000000000000000000000000000000000000000000000000000000000300030000000000000000000000000000000000000000000000000000000000000400030000000000000000000000000000000000000000000000000000000000000500026600000000000000000000000000000301210000000000000000000000000000000000010000000000000000000000000000000a03011200620100000000000000000000000000000003017200233a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007c0001000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000b03011200430100000000000000000000000000000003015300243a000000000000000000000000000000000000000000000000000000000000000267000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000003e8000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4e000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000020b00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006a000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000660000000000000000000000000000000000000000000000000000000000000002640000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112004a0100000000000000000000000000000003015a00823a000000000000000000000000000000000000000000000000000000000000000000000000000000ff008888888888888888888888888888888888888888888888888888888888880100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a0000000000000000000000000000000000000000000000000000000000000067000000000000000000000000000000000000000000000000000000000000000265000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000010000000000003e80ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000003e8000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4e000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000020a000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c3010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000b00000000000000000000000000000000000000000000000000000000000000020d00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a00000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000002700000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112002c0100000000000000000000000000000003013c00833a00000000000000000000000000000000000000000000000000000000000000000000000000000100000100000000000000000000000000000003011200640100000000000000000000000000000003017400843a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000039000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853a000000000000000000000000000000000000000000000000000000000000006500000000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000703001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000020c000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000032010000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d00000000000000000000000000000000000000000000000000000000000000000000000000000200000000000b0838ff00000000000000000000000000000000000000000000000000000000000000000003f0000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000927c0000000a00000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4b000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200a4030000000000000000000000000000000300b400843d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007501000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000006705000000000000000000000000000000000000000000000000000000000000060300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000d00000000000000000000000000000000000000000000000000000000000000020f0000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000070c007d02000000013a000000000000000000000000000000000000000000000000000000000000000000000000000000800258020000000000002200204b0000000000000000000000000000000000000000000000000000000000000014c00000000000001600142800000000000000000000000000000000000000050000200c005e0200000001730000000000000000000000000000000000000000000000000000000000000000000000000000000001a701000000000000220020b200000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c000007").unwrap(), &(Arc::clone(&logger) as Arc<dyn Logger>)); let log_entries = logger.lines.lock().unwrap(); assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendAcceptChannel event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679".to_string())), Some(&1)); // 1 assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendFundingSigned event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); // 2 assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendFundingLocked event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); // 3 assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendFundingLocked event in peer_handler for node 030200000000000000000000000000000000000000000000000000000000000000 for channel 3a00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); // 4 assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendRevokeAndACK event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&4)); // 5 assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 with 0 adds, 0 fulfills, 0 fails for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&3)); // 6 assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030200000000000000000000000000000000000000000000000000000000000000 with 1 adds, 0 fulfills, 0 fails for channel 3a00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&3)); // 7 assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 with 0 adds, 1 fulfills, 0 fails for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); // 8 assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 with 0 adds, 0 fulfills, 1 fails for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&2)); // 9 assert_eq!(log_entries.get(&("lightning::chain::channelmonitor".to_string(), "Input spending counterparty commitment tx (0000000000000000000000000000000000000000000000000000000000000073:0) in 0000000000000000000000000000000000000000000000000000000000000067 resolves outbound HTLC with payment hash ff00000000000000000000000000000000000000000000000000000000000000 with timeout".to_string())), Some(&1)); // 10 } }
} },
49ec6969af2649b03c6f5433a86389dfcd10d971.js
mycallback( {"CONTRIBUTOR OCCUPATION": "VP MTI Research & Development", "CONTRIBUTION AMOUNT (F3L Bundled)": "150.00", "ELECTION CODE": "", "MEMO CODE": "", "CONTRIBUTOR EMPLOYER": "Mylan Technologies Inc.", "DONOR CANDIDATE STATE": "", "CONTRIBUTOR STREET 1": "346 Vitez Dr", "CONTRIBUTOR MIDDLE NAME": "E", "DONOR CANDIDATE FEC ID": "", "DONOR CANDIDATE MIDDLE NAME": "", "CONTRIBUTOR STATE": "WV", "DONOR CANDIDATE FIRST NAME": "", "CONTRIBUTOR FIRST NAME": "Michael", "BACK REFERENCE SCHED NAME": "", "DONOR CANDIDATE DISTRICT": "", "CONTRIBUTION DATE": "20100930", "DONOR COMMITTEE NAME": "", "MEMO TEXT/DESCRIPTION": "P/R Deduction ($0.00 )", "Reference to SI or SL system code that identifies the Account": "", "FILER COMMITTEE ID NUMBER": "C00332395", "DONOR CANDIDATE LAST NAME": "", "CONTRIBUTOR LAST NAME": "Houghton", "_record_type": "fec.version.v7_0.SA", "CONDUIT STREET2": "", "CONDUIT STREET1": "", "DONOR COMMITTEE FEC ID": "", "CONTRIBUTION PURPOSE DESCRIP": "", "CONTRIBUTOR ZIP": "26508", "CONTRIBUTOR STREET 2": "", "CONDUIT CITY": "", "ENTITY TYPE": "IND", "CONTRIBUTOR CITY": "Morgantown", "CONTRIBUTOR SUFFIX": "", "TRANSACTION ID": "PR1481904520383", "DONOR CANDIDATE SUFFIX": "", "DONOR CANDIDATE OFFICE": "", "CONTRIBUTION PURPOSE CODE": "15", "ELECTION OTHER DESCRIPTION": "", "_src_file": "2011/20110411/721688.fec_1.yml", "CONDUIT STATE": "", "CONTRIBUTOR ORGANIZATION NAME": "", "BACK REFERENCE TRAN ID NUMBER": "", "DONOR CANDIDATE PREFIX": "", "CONTRIBUTOR PREFIX": "", "CONDUIT ZIP": "", "CONDUIT NAME": "", "CONTRIBUTION AGGREGATE F3L Semi-annual Bundled": "450.00", "FORM TYPE": "SA11ai"});
output.py
# -*- coding: utf-8 -*- """ Created on Mon Jan 09 09:59:13 2017 @author: as624 """ import csv import os import matplotlib.pyplot as plt import numpy as np def output_detection_figures( image, wells, bacteria, timeindex, output_dir): """ Produces and saves figures showing the output from the detection Parameters ------ image : ndarray (2D) The initial image that detection was run on wells : ndarray (2D) of dtype int A labelled image showing the detected wells bacteria : ndarray (2D) of dtype int A labelled image showing the detected bacteria timeindex : int The timepoint that has been analysed output_dir : str (path) Where to save the images """ # For detection figures, labels not needed (I think)? plt.figure(figsize=(16, 12)) plt.imshow(image, cmap='gray') plt.contour(wells > 0, levels=[0.5], colors=['y']) #plt.contour(channel>0, levels=[0.5], colors=['r']) for lab_bac in range(1, bacteria.max() + 1): col = plt.cm.gist_rainbow((lab_bac / 9.1) % 1) plt.contour(bacteria == lab_bac, levels=[0.5], colors=[col]) plt.savefig(os.path.join( output_dir, "detection_frame_{:06d}".format(timeindex))) plt.close() def output_tracking_figures( data, fullwellimages, wellcoords, allbacteria, output_dir, bacteria_lineage):
def final_output(measurements, output_dir): """outputs a final csv with information on the bacteria detected Parameters ------ measurements : Custom class instance Its attribute "bacteria" is a dictionary containing information on each individual bacteria output_dir : str (path) Where to write the csv """ output_csv_file = os.path.join(output_dir, 'Results.csv') with open(output_csv_file, "w", newline='') as file0: writer = csv.writer(file0) for numbac, (bac) in enumerate(measurements.bacteria.values()): if numbac == 0: writer.writerow(bac.headings_line) writer.writerow(bac.measurements_output)
""" Produces and saves figures showing the output after tracking Parameters ------ data : list of ndarrays List of initial image that detection was run on fullwellimages : list of ndarrays List of labelled images showing the detected wells wellcoords : list of arrays Each entry contains a further list where each entry contains well coordinates allbacteria : list of arrays List of labelled images showing the detected bacteria output_dir : str (path) Where to save the images bacteria_lineage : dictionary A dictionary that links the physical unique label of a bacteria to one which shows information on its lineage """ for tpoint, (image, fullwells, bacteria, coords) in enumerate( zip(data, fullwellimages, allbacteria, wellcoords)): # For detection figures, labels not needed (I think)? plt.figure(figsize=(16, 12)) plt.imshow(image, cmap='gray') if len(np.unique(fullwells)) == 1: plt.savefig(os.path.join( output_dir, "tracking_frame_{:06d}".format(tpoint))) plt.close() continue plt.contour(fullwells > 0, levels=[0.5], colors=['y']) bacteriaim = np.zeros_like(fullwells) for welllabel in coords: bacteriaim[coords[welllabel]] = bacteria[welllabel] # Add in well labels top left(?) of well contour #bw = fullwells == welllabel # if not np.any(bw): # continue #pos0 = bw.nonzero() pos = (np.min(coords[welllabel][0]), np.max(coords[welllabel][1])) plt.text(pos[1], pos[0], "%d" % welllabel, color="y") for lab_bac in range(1, bacteriaim.max() + 1): col = plt.cm.gist_rainbow((lab_bac / 9.1) % 1) bw0 = bacteriaim == lab_bac if not np.any(bw0): continue plt.contour(bw0, levels=[0.5], colors=[col]) pos0 = bw0.nonzero() if len(pos0[0]) == 0 or len(pos0[1]) == 0: continue #lab_string = label_dict_string[lab_bac] pos = (np.min(pos0[0]), np.max(pos0[1])) plt.text(pos[1], pos[0], str(bacteria_lineage[lab_bac]), color=col) plt.savefig(os.path.join( output_dir, "tracking_frame_{:06d}".format(tpoint))) plt.close()
fetch_from_follower_test.py
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time from collections import defaultdict from ducktape.mark import matrix from ducktape.mark.resource import cluster from kafkatest.services.console_consumer import ConsoleConsumer from kafkatest.services.kafka import KafkaService, quorum from kafkatest.services.monitor.jmx import JmxTool from kafkatest.services.verifiable_producer import VerifiableProducer from kafkatest.services.zookeeper import ZookeeperService from kafkatest.tests.produce_consume_validate import ProduceConsumeValidateTest from kafkatest.utils import is_int class FetchFromFollowerTest(ProduceConsumeValidateTest): RACK_AWARE_REPLICA_SELECTOR = "org.apache.kafka.common.replica.RackAwareReplicaSelector" METADATA_MAX_AGE_MS = 3000 def __init__(self, test_context): super(FetchFromFollowerTest, self).__init__(test_context=test_context) self.jmx_tool = JmxTool(test_context, jmx_poll_ms=100) self.topic = "test_topic" self.zk = ZookeeperService(test_context, num_nodes=1) if quorum.for_test(test_context) == quorum.zk else None self.kafka = KafkaService(test_context, num_nodes=3, zk=self.zk, topics={ self.topic: { "partitions": 1, "replication-factor": 3, "configs": {"min.insync.replicas": 1}}, }, server_prop_overrides=[ ["replica.selector.class", self.RACK_AWARE_REPLICA_SELECTOR] ], per_node_server_prop_overrides={ 1: [("broker.rack", "rack-a")], 2: [("broker.rack", "rack-b")], 3: [("broker.rack", "rack-c")] }, controller_num_nodes_override=1) self.producer_throughput = 1000 self.num_producers = 1 self.num_consumers = 1 def min_cluster_size(self):
def setUp(self): if self.zk: self.zk.start() self.kafka.start() @cluster(num_nodes=9) @matrix(metadata_quorum=quorum.all_non_upgrade) def test_consumer_preferred_read_replica(self, metadata_quorum=quorum.zk): """ This test starts up brokers with "broker.rack" and "replica.selector.class" configurations set. The replica selector is set to the rack-aware implementation. One of the brokers has a different rack than the other two. We then use a console consumer with the "client.rack" set to the same value as the differing broker. After producing some records, we verify that the client has been informed of the preferred replica and that all the records are properly consumed. """ # Find the leader, configure consumer to be on a different rack leader_node = self.kafka.leader(self.topic, 0) leader_idx = self.kafka.idx(leader_node) non_leader_idx = 2 if leader_idx != 2 else 1 non_leader_rack = "rack-b" if leader_idx != 2 else "rack-a" self.logger.debug("Leader %d %s" % (leader_idx, leader_node)) self.logger.debug("Non-Leader %d %s" % (non_leader_idx, non_leader_rack)) self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka, self.topic, throughput=self.producer_throughput) self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka, self.topic, client_id="console-consumer", group_id="test-consumer-group-1", consumer_timeout_ms=60000, message_validator=is_int, consumer_properties={"client.rack": non_leader_rack, "metadata.max.age.ms": self.METADATA_MAX_AGE_MS}) # Start up and let some data get produced self.start_producer_and_consumer() time.sleep(self.METADATA_MAX_AGE_MS * 2. / 1000) consumer_node = self.consumer.nodes[0] consumer_idx = self.consumer.idx(consumer_node) read_replica_attribute = "preferred-read-replica" read_replica_mbean = "kafka.consumer:type=consumer-fetch-manager-metrics,client-id=%s,topic=%s,partition=%d" % \ ("console-consumer", self.topic, 0) self.jmx_tool.jmx_object_names = [read_replica_mbean] self.jmx_tool.jmx_attributes = [read_replica_attribute] self.jmx_tool.start_jmx_tool(consumer_idx, consumer_node) # Wait for at least one interval of "metadata.max.age.ms" time.sleep(self.METADATA_MAX_AGE_MS * 2. / 1000) # Read the JMX output self.jmx_tool.read_jmx_output(consumer_idx, consumer_node) all_captured_preferred_read_replicas = defaultdict(int) self.logger.debug(self.jmx_tool.jmx_stats) for ts, data in self.jmx_tool.jmx_stats[0].items(): for k, v in data.items(): if k.endswith(read_replica_attribute): all_captured_preferred_read_replicas[int(v)] += 1 self.logger.debug("Saw the following preferred read replicas %s", dict(all_captured_preferred_read_replicas.items())) assert all_captured_preferred_read_replicas[non_leader_idx] > 0, \ "Expected to see broker %d (%s) as a preferred replica" % (non_leader_idx, non_leader_rack) # Validate consumed messages self.stop_producer_and_consumer() self.validate()
return super(FetchFromFollowerTest, self).min_cluster_size() + self.num_producers * 2 + self.num_consumers * 2
SidebarSimple.tsx
/* GENERATED FILE */ import * as React from "react"; import Svg, { Rect, Path } from "react-native-svg"; import { IconProps } from '../lib' function
(props: IconProps) { return <Svg viewBox="0 0 256 256" width={props.size} height={props.size} fill={props.color} {...props}><Rect width={256} height={256} fill="none" /><Path d="M216,40H40A16,16,0,0,0,24,56V200a16,16,0,0,0,16,16H216a16,16,0,0,0,16-16V56A16,16,0,0,0,216,40Zm0,160H88V56H216V200Z" /></Svg>; } export default SidebarSimple;
SidebarSimple
utils.py
# LICENSE file in the root directory of this source tree. # Convert data to textflint format and run transform functions in textflint import glob import json import os from textflint import Engine CONFIG_PATH = "textflint_utils/configs" TRANSFORM_FIELDS = { "nli": {"context": "premise", "hypothesis": "hypothesis"}, "sentiment": {"statement": "x"}, "hs": {"statement": "x"}, "qa": {"context": "context", "question": "question"}, } LABEL_FIELD = {"nli": "label", "sentiment": "label", "hs": "label", "qa": "answer"} LABEL_MAP = { "nli": { "neutral": "neutral", "contradictory": "contradiction", "entailed": "entailment", }, "sentiment": {"positive": "positive", "negative": "negative", "neutral": "neutral"}, "hs": {"hateful": "hateful", "not-hateful": "not-hateful"}, } def findall(p, s): # Yields all the positions of the pattern p in the string s. i = s.find(p) while i != -1: yield i i = s.find(p, i + 1) # This converts dynabench dataset to textflint format def reformat_data_to_textflint(samples, task): converted_samples = [] perturb_fields = TRANSFORM_FIELDS.get(task, None) label_map = LABEL_MAP.get(task, None) for i in range(len(samples)): sample = samples[i] converted = {"sample_id": i + 1} if task == "qa": answer = sample["answer"] if type(answer) is list: answers = set(answer) else: answers = [answer] converted["answers"] = [] for answer in answers: converted["answers"] += [ {"text": answer, "answer_start": i} for i in findall(answer, sample["context"]) ] converted["title"] = "" converted["is_impossible"] = False else: converted["y"] = label_map[sample["label"]] for key, value in perturb_fields.items(): converted[value] = sample[key] converted_samples.append(converted) return converted_samples def load_config(config_path): config = None with open(config_path) as f: config = json.loads(f.read()) return config def get_orig_value(data, sample, field): return data[sample["sample_id"]][field] def get_transformed_data(config_path, data, task): config = load_config(config_path) out_dir = config["out_dir"] out_files = os.listdir(out_dir) trans_samples = [] perturb_fields = TRANSFORM_FIELDS.get(task, None) label_field = LABEL_FIELD.get(task, None) for fname in out_files: if fname.startswith("ori"): continue fname = os.path.join(out_dir, fname) parts = fname.split("_") new_suffix = "_".join(parts[1:-1]) with open(fname) as f: for line in f: sample = json.loads(line) trans_sample = {"input_id": get_orig_value(data, sample, "uid")} trans_sample[label_field] = get_orig_value(data, sample, label_field) for key, value in perturb_fields.items(): trans_sample[key] = sample[value] # create an unique uid for new examples trans_sample["uid"] = str(trans_sample["input_id"]) + "_" + new_suffix trans_samples.append(trans_sample) return trans_samples def run_textflint(data, task): textflint_data = reformat_data_to_textflint(data, task) engine = Engine() config_file = os.path.join(CONFIG_PATH, task + "_config.json") config = load_config(config_file) out_dir = config["out_dir"] files = glob.glob(out_dir + "/*") for f in files: os.remove(f) engine.run(textflint_data, config_file) perturbed_data = get_transformed_data(config_file, data, task) return perturbed_data
# Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the
example_sparsifier_graph.py
#!/usr/bin/python3 import tensorflow as tf import tfgraph def main(): with tf.Session() as sess: g: tfgraph.Graph = tfgraph.GraphConstructor.unweighted_random(sess, "G", 10, 85) g_sparse: tfgraph.Graph = tfgraph.GraphConstructor.as_sparsifier(sess, g, 0.75) print(g) print(g.m) print(g_sparse) print(g_sparse.m) print(g_sparse.m / g.m)
if __name__ == '__main__': main()
examples.map.builder.js
/* Name: Maps / Map Builder - Examples Written by: Okler Themes - (http://www.okler.net) Theme Version: 1.5.4 */ (function($) { 'use strict'; var $window = $(window); /* Fix Map size on Mobile */ function
() { fixMapSize(); $(window).on('load resize orientationchange', function() { fixMapSize(); }); } function fixMapSize() { if ( $window.width() <= 767 ) { var windowHeight = $(window).height(), offsetTop = $('#gmap').offset().top, contentPadding = parseInt($('.content-body').css('padding-bottom'), 10); $('#gmap').height( windowHeight - offsetTop - contentPadding ); } } // auto initialize $(function() { fixMapListener(); }); }).apply(this, [jQuery]);
fixMapListener
amp-access-server-jwt.js
/** * Copyright 2016 The AMP HTML Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS-IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import {AccessClientAdapter} from './amp-access-client'; import {JwtHelper} from './jwt'; import {Services} from '#service'; import { assertHttpsUrl, isProxyOrigin, removeFragment, serializeQueryString, } from '../../../src/url'; import {dev, user, userAssert} from '../../../src/log'; import {dict} from '#core/types/object'; import {escapeCssSelectorIdent} from '#core/dom/css-selectors'; import {fetchDocument} from '../../../src/document-fetcher'; import {getMode} from '../../../src/mode'; import {isArray} from '#core/types'; import {isExperimentOn} from '#experiments'; /** @const {string} */ const TAG = 'amp-access-server-jwt'; /** @const {number} */ const AUTHORIZATION_TIMEOUT = 3000; /** @const {string} */ const AMP_AUD = 'ampproject.org'; /** * This class implements server-side authorization protocol with JWT. In this * approach only immediately visible sections are downloaded. For authorization, * the client calls the authorization endpoint, which returns a signed JWT * token with `amp_authdata` field. The client calls CDN with this JWT token, * and CDN returns back the authorized content fragments. * * The approximate diagram looks like this: * * Initial GET * || * || [Limited document: fragments requiring * || authorization are exlcuded] * || * \/ * Authorize request to Publisher * || * || [Authorization response as JWT] * || * \/ * Authorize request to CDN w/JWT * || * || [Authorized fragments] * || * \/ * Merge authorized fragments * || * || * \/ * Apply authorization response * * @implements {./amp-access-source.AccessTypeAdapterDef} */ export class AccessServerJwtAdapter { /** * @param {!../../../src/service/ampdoc-impl.AmpDoc} ampdoc * @param {!JsonObject} configJson * @param {!./amp-access-source.AccessTypeAdapterContextDef} context */ constructor(ampdoc, configJson, context) { /** @const */ this.ampdoc = ampdoc; /** @const @private {!./amp-access-source.AccessTypeAdapterContextDef} */ this.context_ = context; /** @private @const */ this.clientAdapter_ = new AccessClientAdapter(ampdoc, configJson, context); /** @const @private {!../../../src/service/xhr-impl.Xhr} */ this.xhr_ = Services.xhrFor(ampdoc.win); /** @const @private {!../../../src/service/timer-impl.Timer} */ this.timer_ = Services.timerFor(ampdoc.win); /** @const @private {!../../../src/service/vsync-impl.Vsync} */ this.vsync_ = Services.vsyncFor(ampdoc.win); /** @private @const {?string} */ this.serverState_ = ampdoc.getMetaByName('i-amphtml-access-state'); const isInExperiment = isExperimentOn(ampdoc.win, 'amp-access-server-jwt'); /** @private @const {boolean} */ this.isProxyOrigin_ = isProxyOrigin(ampdoc.win.location) || isInExperiment; const serviceUrlOverride = isInExperiment ? ampdoc.getParam('serverAccessService') : null; /** @private @const {string} */ this.serviceUrl_ = serviceUrlOverride || removeFragment(ampdoc.win.location.href); /** @const @private {?string} */ this.key_ = configJson['publicKey'] || null; /** @const @private {?string} */ this.keyUrl_ = configJson['publicKeyUrl'] || null; userAssert( this.key_ || this.keyUrl_, '"publicKey" or "publicKeyUrl" must be specified' ); if (this.keyUrl_) { assertHttpsUrl(this.keyUrl_, '"publicKeyUrl"'); } if (this.key_ && this.keyUrl_) { // TODO(dvoytenko): Remove "publicKey" option eventually. user().warn( TAG, 'Both "publicKey" and "publicKeyUrl" specified. ' + 'The "publicKeyUrl" will be ignored.' ); } /** @private @const {!JwtHelper} */ this.jwtHelper_ = new JwtHelper(ampdoc.win); } /** @override */ getConfig() { return { 'client': this.clientAdapter_.getConfig(), 'proxy': this.isProxyOrigin_, 'serverState': this.serverState_, 'publicKey': this.key_, 'publicKeyUrl': this.keyUrl_, }; } /** @override */ isAuthorizationEnabled() { return true; } /** @override */ authorize() { dev().fine( TAG, 'Start authorization with ', this.isProxyOrigin_ ? 'proxy' : 'non-proxy', this.serverState_, this.clientAdapter_.getAuthorizationUrl() ); if (!this.isProxyOrigin_ || !this.serverState_) { return this.authorizeOnClient_(); } return this.authorizeOnServer_(); } /** @override */ isPingbackEnabled() { return this.clientAdapter_.isPingbackEnabled(); } /** @override */ pingback() { return this.clientAdapter_.pingback(); } /** @override */ postAction() { // Nothing to do. } /** * @return {!Promise<{encoded:string, jwt:!JsonObject}>} * @private */ fetchJwt_() { const urlPromise = this.context_.buildUrl( this.clientAdapter_.getAuthorizationUrl(), /* useAuthData */ false ); let jwtPromise = urlPromise .then((url) => { dev().fine(TAG, 'Authorization URL: ', url); return this.timer_.timeoutPromise( AUTHORIZATION_TIMEOUT, this.xhr_.fetchText(url, { credentials: 'include', }) ); }) .then((resp) => { return resp.text(); }) .then((encoded) => { const jwt = this.jwtHelper_.decode(encoded); userAssert( jwt['amp_authdata'], '"amp_authdata" must be present in JWT' ); return {encoded, jwt}; }); if (this.shouldBeValidated_()) { // Validate JWT in the development mode. if (this.jwtHelper_.isVerificationSupported()) { jwtPromise = jwtPromise.then((resp) => { return this.jwtHelper_ .decodeAndVerify(resp.encoded, this.loadKeyPem_()) .then(() => resp); }); } else { user().warn( TAG, 'Cannot verify signature on this browser since' + " it doesn't support WebCrypto APIs" ); } jwtPromise = jwtPromise.then((resp) => { this.validateJwt_(resp.jwt); return resp; }); } return jwtPromise.catch((reason) => { throw user().createError('JWT fetch or validation failed: ', reason); }); } /** * @return {!Promise<string>} * @private */ loadKeyPem_() { if (this.key_) { return Promise.resolve(this.key_); } return this.xhr_ .fetchText(dev().assertString(this.keyUrl_)) .then((res) => res.text()); } /** * @return {boolean} * @private */ shouldBeValidated_() { return getMode().development; } /** * @param {!JsonObject} jwt * @private */ validateJwt_(jwt) { const now = Date.now(); // exp: expiration time. const exp = jwt['exp']; userAssert(exp, '"exp" field must be specified'); userAssert(parseFloat(exp) * 1000 > now, 'token has expired: %s', exp); // aud: audience. const aud = jwt['aud']; userAssert(aud, '"aud" field must be specified'); let audForAmp = false; if (isArray(aud)) { for (let i = 0; i < aud.length; i++) { if (aud[i] == AMP_AUD) { audForAmp = true; break; } } } else { audForAmp = aud == AMP_AUD; } userAssert(audForAmp, '"aud" must be "%s": %s', AMP_AUD, aud); } /** * @return {!Promise<!JsonObject>} * @private */ authorizeOnClient_() { dev().fine( TAG, 'Proceed via client protocol via ', this.clientAdapter_.getAuthorizationUrl() ); return this.fetchJwt_().then((resp) => { return resp.jwt['amp_authdata']; }); } /** * @return {!Promise<!JsonObject>} * @private */ authorizeOnServer_() { dev().fine(TAG, 'Proceed via server protocol'); return this.fetchJwt_().then((resp) => { const {encoded, jwt} = resp; const accessData = jwt['amp_authdata']; const request = serializeQueryString( dict({ 'url': removeFragment(this.ampdoc.win.location.href), 'state': this.serverState_, 'jwt': encoded, }) ); dev().fine(TAG, 'Authorization request: ', this.serviceUrl_, request);
// CORS preflight request. return this.timer_ .timeoutPromise( AUTHORIZATION_TIMEOUT, fetchDocument(this.ampdoc.win, this.serviceUrl_, { method: 'POST', body: request, headers: dict({ 'Content-Type': 'application/x-www-form-urlencoded', }), }) ) .then((response) => { dev().fine(TAG, 'Authorization response: ', response); return this.replaceSections_(response); }) .then(() => accessData); }); } /** * @param {!Document} doc * @return {!Promise} */ replaceSections_(doc) { const sections = doc.querySelectorAll('[i-amphtml-access-id]'); dev().fine(TAG, '- access sections: ', sections); return this.vsync_.mutatePromise(() => { for (let i = 0; i < sections.length; i++) { const section = sections[i]; const sectionId = section.getAttribute('i-amphtml-access-id'); const target = this.ampdoc .getRootNode() .querySelector( `[i-amphtml-access-id="${escapeCssSelectorIdent(sectionId)}"]` ); if (!target) { dev().warn(TAG, 'Section not found: ', sectionId); continue; } target.parentElement.replaceChild( this.ampdoc.win.document.importNode(section, /* deep */ true), target ); } }); } }
dev().fine(TAG, '- access data: ', accessData); // Note that `application/x-www-form-urlencoded` is used to avoid
grpc_server.rs
// Copyright 2021 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::sync::Arc; use common_base::tokio; use common_base::tokio::sync::oneshot; use common_base::tokio::sync::oneshot::Receiver; use common_base::tokio::sync::oneshot::Sender; use common_base::tokio::task::JoinHandle; use common_base::Stoppable; use common_exception::ErrorCode; use common_meta_types::protobuf::meta_server::MetaServer; use common_tracing::tracing; use common_tracing::tracing::Instrument; use futures::future::Either; use tonic::transport::Identity; use tonic::transport::Server; use tonic::transport::ServerTlsConfig; use crate::api::grpc::grpc_service::MetaGrpcImpl; use crate::configs::Config; use crate::meta_service::MetaNode; pub struct GrpcServer { conf: Config, meta_node: Arc<MetaNode>, join_handle: Option<JoinHandle<()>>, stop_tx: Option<Sender<()>>, fin_rx: Option<Receiver<()>>, } impl GrpcServer { pub fn create(conf: Config, meta_node: Arc<MetaNode>) -> Self { Self { conf, meta_node, join_handle: None, stop_tx: None, fin_rx: None, } } pub fn get_meta_node(&self) -> Arc<MetaNode> { self.meta_node.clone() } async fn do_start(&mut self) -> common_exception::Result<()> { let conf = self.conf.clone(); let meta_node = self.meta_node.clone(); // For sending signal when server started. let (started_tx, started_rx) = oneshot::channel::<()>(); // For receive stop signal. let (stop_tx, stop_rx) = oneshot::channel::<()>(); // For sending the signal when server finished shutting down. let (fin_tx, fin_rx) = oneshot::channel::<()>(); let builder = Server::builder(); let tls_conf = Self::tls_config(&self.conf).await.map_err(|e| { ErrorCode::TLSConfigurationFailure(format!("failed to build ServerTlsConfig {}", e)) })?; let mut builder = if let Some(tls_conf) = tls_conf { tracing::info!("gRPC TLS enabled"); builder.tls_config(tls_conf).map_err(|e| { ErrorCode::TLSConfigurationFailure(format!("gRPC server tls_config failure {}", e)) })? } else { builder }; let addr = conf.grpc_api_address.parse::<std::net::SocketAddr>()?; tracing::info!("gRPC addr: {}", addr); let grpc_impl = MetaGrpcImpl::create(meta_node.clone()); let grpc_srv = MetaServer::new(grpc_impl); let j = tokio::spawn( async move { let res = builder .add_service(grpc_srv) .serve_with_shutdown(addr, async move { let _ = started_tx.send(()); tracing::info!("metasrv starts to wait for stop signal: {}", addr); let _ = stop_rx.await; tracing::info!("metasrv receives stop signal: {}", addr); }) .await; // gRPC server quit. Starting to shutdown meta node. let _ = meta_node.stop().await; let send_fin_res = fin_tx.send(()); tracing::info!( "metasrv sending signal of finishing shutdown {}, res: {:?}", addr, send_fin_res ); tracing::info!("metasrv returned res: {:?}", res); } .instrument(tracing::debug_span!("spawn-grpc")), ); started_rx .await .map_err(|e| ErrorCode::MetaServiceError(e.to_string()))?; self.join_handle = Some(j); self.stop_tx = Some(stop_tx); self.fin_rx = Some(fin_rx); Ok(()) } async fn
( &mut self, force: Option<tokio::sync::broadcast::Receiver<()>>, ) -> common_exception::Result<()> { if let Some(tx) = self.stop_tx.take() { let _ = tx.send(()); } if let Some(j) = self.join_handle.take() { if let Some(mut f) = force { let f = Box::pin(f.recv()); let j = Box::pin(j); match futures::future::select(f, j).await { Either::Left((_x, j)) => { tracing::info!("received force shutdown signal"); j.abort(); } Either::Right(_) => { tracing::info!("Done: graceful shutdown"); } } } else { tracing::info!("no force signal, block waiting for join handle for ever"); j.await.map_err(|e| { ErrorCode::MetaServiceError(format!("metasrv join handle error: {}", e)) })?; tracing::info!("Done: waiting for join handle for ever"); } } if let Some(rx) = self.fin_rx.take() { tracing::info!("block waiting for fin_rx"); rx.await.map_err(|e| { ErrorCode::MetaServiceError(format!("metasrv fin_rx recv error: {}", e)) })?; tracing::info!("Done: block waiting for fin_rx"); } Ok(()) } async fn tls_config(conf: &Config) -> anyhow::Result<Option<ServerTlsConfig>> { if conf.tls_rpc_server_enabled() { let cert = tokio::fs::read(conf.grpc_tls_server_cert.as_str()).await?; let key = tokio::fs::read(conf.grpc_tls_server_key.as_str()).await?; let server_identity = Identity::from_pem(cert, key); let tls = ServerTlsConfig::new().identity(server_identity); Ok(Some(tls)) } else { Ok(None) } } } #[tonic::async_trait] impl Stoppable for GrpcServer { async fn start(&mut self) -> common_exception::Result<()> { tracing::info!("GrpcServer::start"); self.do_start().await?; tracing::info!("Done GrpcServer::start"); Ok(()) } async fn stop( &mut self, force: Option<tokio::sync::broadcast::Receiver<()>>, ) -> common_exception::Result<()> { tracing::info!("GrpcServer::stop"); self.do_stop(force).await?; tracing::info!("Done GrpcServer::stop"); Ok(()) } }
do_stop
lucky.py
def next_lucky(n): if len(n) == 0: return '' if int(n[0]) < 3: return '3'*len(n) elif int(n[0]) == 3: pos1 = int(n[0]+next_lucky(n[1:])) pos2 = int('5'+'3'*(len(n)-1)) if pos1 > pos2: pos1,pos2 = pos2,pos1 if pos1 > int(n): return str(pos1) else: return str(pos2) elif int(n[0]) < 5: return '5'+'3'*(len(n)-1) elif int(n[0]) == 5: pos1 = int(n[0]+next_lucky(n[1:])) pos2 = int('3'*(len(n)+1)) if pos1 > pos2: pos1,pos2 = pos2,pos1 if pos1 > int(n): return str(pos1) else: return str(pos2) elif int(n[0]) > 5: return '3'*(len(n)+1) if __name__ == "__main__": assert next_lucky('33')=='35' assert next_lucky('35')=='53' assert next_lucky('200')=='333' assert next_lucky('5555')=='33333' for testcase in xrange(int(raw_input())): print next_lucky(raw_input()) #----------original attempt---------- # brute force, too slow """ import re
nlen = len(nstr) while re.search(r'[^35]', nstr): n+=1 nstr=str(n) return n if __name__ == "__main__": for testcase in xrange(int(raw_input())): print next_lucky(int(raw_input())) """
def next_lucky(n): n+=1 nstr = str(n)
cifar_cnn_three_conv.py
import tensorflow as tf import keras from keras.datasets import cifar10 import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import os import sys import csv import utils_csv import utils_tf as utils from cleverhans.utils_tf import model_train, model_eval from cleverhans.attacks import FastGradientMethod from cleverhans.model import Model print("Tensorflow version " + tf.__version__) config_num = int(sys.argv[1]) if len(sys.argv) > 1 else 1 # Choose type of learning technique according to config_dict config_dict = {0: "backprop", 1: "biprop", 2: "halfbiprop", 3: "nobias_backprop", 4: "nobias_biprop", 5: "nobias_halfbiprop"} num_classes = 10 model_name = sys.argv[0].replace(".py", "") + "_" + config_dict[config_num] print("Model name: " + model_name) # load data # https://github.com/BIGBALLON/cifar-10-cnn/blob/master/1_Lecun_Network/LeNet_keras.py (x_train, y_train), (x_test, y_test) = cifar10.load_data() y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 # for reproducibility np.random.seed(0) tf.set_random_seed(0) sess = tf.InteractiveSession() # three convolutional layers with their channel counts, and a # fully connected layer (tha last layer has 10 softmax neurons) K = 4 # first convolutional layer output depth L = 8 # second convolutional layer output depth M = 12 # third convolutional layer N = 200 # fully connected layer with tf.name_scope("input"): # input X & output GX_: 28x28 grayscale images, the first dimension (None) will index the images in the mini-batch X = tf.placeholder(tf.float32, [None, 32, 32, 3]) X_noisy = tf.placeholder(tf.float32, [None, 32, 32, 3]) X_adv = tf.placeholder(tf.float32, [None, 32, 32, 3]) GX_ = tf.placeholder(tf.float32, [None, 32, 32, 3]) # output Y_ & input GY: labels for classification and generation Y_ = tf.placeholder(tf.float32, [None, num_classes]) GY = tf.placeholder(tf.float32, [None, num_classes]) # variable learning rate lr = tf.placeholder(tf.float32) # variable batch size BS = tf.placeholder(tf.int32) input_test_sum = tf.summary.image("input", X, num_classes) input_noisy_sum = tf.summary.image("input-noisy", X_noisy, num_classes) input_adv_sum = tf.summary.image("input-adv", X_adv, num_classes) with tf.name_scope("classifier-generator"): C_W1 = utils.weight_variable([5, 5, 3, K], stddev=0.1, name="C_W1") C_W2 = utils.weight_variable([5, 5, K, L], stddev=0.1, name="C_W2") C_W3 = utils.weight_variable([4, 4, L, M], stddev=0.1, name="C_W3") C_W4 = utils.weight_variable([8 * 8 * M, N], stddev=0.1, name="C_W4") C_W5 = utils.weight_variable([N, num_classes], stddev=0.1, name="C_W5") def classifier(x, reuse=None): with tf.variable_scope("classifier", reuse=reuse) as scope_c: # Variables for classifier C_B1 = utils.bias_variable([K], name="C_B1") C_B2 = utils.bias_variable([L], name="C_B2") C_B3 = utils.bias_variable([M], name="C_B3") C_B4 = utils.bias_variable([N], name="C_B4") C_B5 = utils.bias_variable([num_classes], name="C_B5") stride = 1 # output is 32x32 H1 = tf.nn.relu(tf.nn.conv2d(x, C_W1, strides=[1, stride, stride, 1], padding='SAME') + C_B1) stride = 2 # output is 16x16 H2 = tf.nn.relu(tf.nn.conv2d(H1, C_W2, strides=[1, stride, stride, 1], padding='SAME') + C_B2) stride = 2 # output is 8x8 H3 = tf.nn.relu(tf.nn.conv2d(H2, C_W3, strides=[1, stride, stride, 1], padding='SAME') + C_B3) # reshape the output from the third convolution for the fully connected layer HH3 = tf.reshape(H3, shape=[-1, 8 * 8 * M]) H4 = tf.nn.relu(tf.matmul(HH3, C_W4) + C_B4) Ylogits = tf.matmul(H4, C_W5) + C_B5 Ysigmoid = tf.nn.sigmoid(Ylogits) Ysoftmax = tf.nn.softmax(Ylogits) return Ysoftmax, Ysigmoid, Ylogits class ClassifierModel(Model): def get_logits(self, x): Ysoftmax, Ysigmoid, Ylogits = classifier(x, reuse=True) return Ylogits # Generator of random input reuses weights of classifier def generator(y, bs, reuse=None):
def plot_generator(samples): if num_classes == 10: fig = plt.figure(figsize=(5, 2)) gs = gridspec.GridSpec(2, 5) else: fig = plt.figure(figsize=(10, 10)) gs = gridspec.GridSpec(10, 10) gs.update(wspace=0.05, hspace=0.05) for i, sample in enumerate(samples): ax = plt.subplot(gs[i]) plt.axis('off') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_aspect('equal') plt.imshow(sample.reshape((32,32,3))) return fig GXsigmoid, GXlogits = generator(GY, BS) GXsigmoid_test, GXlogits_test = generator(GY, BS, reuse=True) Ysoftmax, Ysigmoid, Ylogits = classifier(X) model_classifier = ClassifierModel() Ysoftmax_noisy, Ysigmoid_noisy, Ylogits_noisy = classifier(X_noisy, reuse=True) Ysoftmax_adv, Ysigmoid_adv, Ylogits_adv = classifier(X_adv, reuse=True) with tf.name_scope("loss"): c_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)) g_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=GXlogits, labels=GX_)) """ Summary """ g_loss_sum = tf.summary.scalar("g_loss", g_loss) c_loss_sum = tf.summary.scalar("c_loss", c_loss) # accuracy of the trained model, between 0 (worst) and 1 (best) with tf.name_scope("accuracy"): with tf.name_scope("correct_prediction"): correct_prediction = tf.equal(tf.argmax(Ysoftmax, 1), tf.argmax(Y_, 1)) with tf.name_scope("accuracy"): accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) with tf.name_scope("correct_prediction_noisy"): correct_prediction_noisy = tf.equal(tf.argmax(Ysoftmax_noisy, 1), tf.argmax(Y_, 1)) with tf.name_scope("accuracy_noisy"): accuracy_noisy = tf.reduce_mean(tf.cast(correct_prediction_noisy, tf.float32)) with tf.name_scope("correct_prediction_adv"): correct_prediction_adv = tf.equal(tf.argmax(Ysoftmax_adv, 1), tf.argmax(Y_, 1)) with tf.name_scope("accuracy_adv"): accuracy_adv = tf.reduce_mean(tf.cast(correct_prediction_adv, tf.float32)) """ Summary """ accuracy_sum = tf.summary.scalar("accuracy", accuracy) accuracy_noisy_sum = tf.summary.scalar("accuracy_noisy", accuracy_noisy) accuracy_adv_sum = tf.summary.scalar("accuracy_adv", accuracy_adv) with tf.name_scope("max_output"): with tf.name_scope("max_output_test"): max_output_sigmoid_test = tf.reduce_max(Ysigmoid) max_output_softmax_test = tf.reduce_max(Ysoftmax) with tf.name_scope("max_output_noise"): max_output_sigmoid_noise = tf.reduce_max(Ysigmoid_noisy) max_output_softmax_noise = tf.reduce_max(Ysoftmax_noisy) with tf.name_scope("max_output_adv"): max_output_sigmoid_adv = tf.reduce_max(Ysigmoid_adv) max_output_softmax_adv = tf.reduce_max(Ysoftmax_adv) """ Summary """ max_output_sigmoid_test_sum = tf.summary.scalar("max_output_sigmoid_test", max_output_sigmoid_test) max_output_softmax_test_sum = tf.summary.scalar("max_output_softmax_test", max_output_softmax_test) max_output_sigmoid_noise_sum = tf.summary.scalar("max_output_sigmoid_noise", max_output_sigmoid_noise) max_output_softmax_noise_sum = tf.summary.scalar("max_output_softmax_noise", max_output_softmax_noise) max_output_sigmoid_adv_sum = tf.summary.scalar("max_output_sigmoid_adv", max_output_sigmoid_adv) max_output_softmax_adv_sum = tf.summary.scalar("max_output_softmax_adv", max_output_softmax_adv) utils.show_all_variables() t_vars = tf.trainable_variables() c_vars = [var for var in t_vars if 'C_' in var.name]\ if config_num < 3 else [var for var in t_vars if 'C_W' in var.name] g_vars = [var for var in t_vars if 'C_W' in var.name or 'G_' in var.name]\ if config_num < 3 else c_vars # training step learning_rate_dis = lr learning_rate_gen = lr with tf.name_scope("train"): c_train = tf.train.AdamOptimizer(learning_rate_dis).minimize(c_loss, var_list=c_vars) g_train = tf.train.AdamOptimizer(learning_rate_gen).minimize(g_loss, var_list=g_vars) # final summary operations g_sum = tf.summary.merge([g_loss_sum]) c_sum = tf.summary.merge([input_test_sum, accuracy_sum, c_loss_sum, max_output_sigmoid_test_sum, max_output_softmax_test_sum]) noise_sum = tf.summary.merge([max_output_sigmoid_noise_sum, max_output_softmax_noise_sum]) noisy_sum = tf.summary.merge([input_noisy_sum, accuracy_noisy_sum]) adv_sum = tf.summary.merge([input_adv_sum, accuracy_adv_sum, max_output_sigmoid_adv_sum, max_output_softmax_adv_sum]) folder_out = 'out/' + model_name + '/' if not os.path.exists(folder_out): os.makedirs(folder_out) folder_csv = 'csv/' + model_name + '/' if not os.path.exists(folder_csv): os.makedirs(folder_csv) folder_logs = 'logs/' + model_name if not os.path.exists(folder_csv): os.makedirs(folder_logs) writer = tf.summary.FileWriter(folder_logs, sess.graph) batch_size = 100 num_train_images = x_train.shape[0] num_batches = num_train_images // batch_size all_classes = np.eye(num_classes) counter = 0 fgsm_params = {'eps': 0.03, 'clip_min': 0., 'clip_max': 1.} random_noise = np.random.random_sample(x_test.shape) test_image_with_noise = np.clip(x_test + 0.1*random_noise, 0., 1.) accuracy_list = [] sigmoid_list = [] softmax_list = [] # initialize all variables tf.global_variables_initializer().run() for i in range(50001): if i % num_batches == 0: idx_train = np.arange(x_train.shape[0]) np.random.shuffle(idx_train) x_train, y_train = x_train[idx_train], y_train[idx_train] idx = i % num_batches batch_X = x_train[idx*batch_size:(idx+1)*batch_size] batch_Y = y_train[idx*batch_size:(idx+1)*batch_size] # learning rate decay max_learning_rate = 0.003 min_learning_rate = 0.0001 decay_speed = 2000.0 learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * np.exp(-i/decay_speed) if i % 500 == 0 or i == 50000: counter += 1 # Saves generated images samples = sess.run(GXsigmoid_test, feed_dict={GY: all_classes, BS: num_classes}) fig = plot_generator(samples) plt.savefig(folder_out+"gen_"+str(i).zfill(6)+'.png', bbox_inches='tight') plt.close(fig) attack_fgsm = FastGradientMethod(model_classifier, sess=sess) adv_x_np = attack_fgsm.generate_np(x_test, **fgsm_params) fig = plot_generator(adv_x_np[:num_classes]) plt.savefig(folder_out+"adv_"+str(i).zfill(6)+'.png', bbox_inches='tight') plt.close(fig) accu_test, c_loss_test, sigmoid_test, softmax_test, sum_c = sess.run([accuracy, c_loss, max_output_sigmoid_test, max_output_softmax_test, c_sum], {X: x_test, Y_: y_test}) writer.add_summary(sum_c, i) g_loss_test, sum_g = sess.run([g_loss, g_sum], {GY: batch_Y, GX_: batch_X, BS: batch_size}) writer.add_summary(sum_g, i) print(str(i) + ": epoch " + str(i*batch_size//x_train.shape[0]+1)\ + " - test loss class: " + str(c_loss_test) + " test loss gen: " + str(g_loss_test)) print("Real test images - Sigmoid: " + str(sigmoid_test) + "\tSoftmax: " + str(softmax_test) + "\taccuracy: "+ str(accu_test)) sigmoid_random, softmax_random, sum_random = sess.run([max_output_sigmoid_noise, max_output_softmax_noise, noise_sum], {X_noisy: random_noise}) writer.add_summary(sum_random, i) accu_random, sum_noisy = sess.run([accuracy_noisy, noisy_sum], {X_noisy: test_image_with_noise, Y_: y_test}) writer.add_summary(sum_noisy, i) print("Random noise images - Sigmoid: " + str(sigmoid_random) + "\tSoftmax: " + str(softmax_random) + "\taccuracy: "+ str(accu_random)) accu_adv, sigmoid_adv, softmax_adv, sum_adv = sess.run([accuracy_adv, max_output_sigmoid_adv, max_output_softmax_adv, adv_sum], {X_adv: adv_x_np, Y_: y_test}) writer.add_summary(sum_adv, i) print("Adversarial examples - Sigmoid: " + str(sigmoid_adv) + "\tSoftmax: " + str(softmax_adv) + "\taccuracy: "+ str(accu_adv)) print() accuracy_list.append([i, accu_test, accu_random, accu_adv, counter]) sigmoid_list.append([i, sigmoid_test, sigmoid_random, sigmoid_adv, counter]) softmax_list.append([i, softmax_test, softmax_random, softmax_adv, counter]) sess.run(c_train, {X: batch_X, Y_: batch_Y, lr: learning_rate}) if config_num == 1 or (config_num == 2 and i < 25000) or\ config_num == 4 or (config_num == 5 and i < 25000): sess.run(g_train, {GY: batch_Y, GX_: batch_X, lr: learning_rate, BS: batch_size}) writer.close() # Save data in csv with open(folder_csv+"accuracy.csv", "w") as output: writer = csv.writer(output, lineterminator='\n') writer.writerows(accuracy_list) with open(folder_csv+"sigmoid.csv", "w") as output: writer = csv.writer(output, lineterminator='\n') writer.writerows(sigmoid_list) with open(folder_csv+"softmax.csv", "w") as output: writer = csv.writer(output, lineterminator='\n') writer.writerows(softmax_list) # Load data in csv accu_data = utils_csv.get_data_csv_file(folder_csv+"accuracy.csv") sigmoid_data = utils_csv.get_data_csv_file(folder_csv+"sigmoid.csv") softmax_data = utils_csv.get_data_csv_file(folder_csv+"softmax.csv") # Print best values utils_csv.print_best(accu_data, sigmoid_data, softmax_data, folder_csv+"summary.txt")
with tf.variable_scope("generator", reuse=reuse) as scope_g: # Variables for classifier G_B1 = utils.bias_variable([3], name="G_B1") G_B2 = utils.bias_variable([K], name="G_B2") G_B3 = utils.bias_variable([L], name="G_B3") G_B4 = utils.bias_variable([M*8*8], name="G_B4") G_B5 = utils.bias_variable([N], name="G_B5") GH4 = tf.nn.relu(tf.matmul(y, tf.transpose(C_W5)) + G_B5) GH3 = tf.nn.relu(tf.matmul(GH4, tf.transpose(C_W4)) + G_B4) GHH3 = tf.reshape(GH3, shape=[-1, 8, 8, M]) stride = 2 # output is 14x14 GH2 = tf.nn.relu(tf.nn.conv2d_transpose(GHH3, C_W3, output_shape=[bs, 16, 16, L], strides=[1, stride, stride, 1]) + G_B3) #deconv2 W3 stride = 2 # output is 28x28 GH1 = tf.nn.relu(tf.nn.conv2d_transpose(GH2, C_W2, output_shape=[bs, 32, 32, K], strides=[1, stride, stride, 1]) + G_B2)#deconv2 W2 stride = 1 # output is 28x28 GXlogits = tf.nn.conv2d_transpose(GH1, C_W1, output_shape=[bs, 32, 32, 3], strides=[1, stride, stride, 1]) + G_B1#deconv2 W1 GXsigmoid = tf.nn.sigmoid(GXlogits) return GXsigmoid, GXlogits
c7816.rs
#[doc = "Reader of register C7816"] pub type R = crate::R<u8, super::C7816>; #[doc = "Writer for register C7816"] pub type W = crate::W<u8, super::C7816>; #[doc = "Register C7816 `reset()`'s with value 0"] impl crate::ResetValue for super::C7816 { type Type = u8; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "ISO-7816 Functionality Enabled\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ISO_7816E_A { #[doc = "0: ISO-7816 functionality is turned off/not enabled."] _0 = 0, #[doc = "1: ISO-7816 functionality is turned on/enabled."] _1 = 1, } impl From<ISO_7816E_A> for bool { #[inline(always)] fn from(variant: ISO_7816E_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `ISO_7816E`"] pub type ISO_7816E_R = crate::R<bool, ISO_7816E_A>; impl ISO_7816E_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> ISO_7816E_A { match self.bits { false => ISO_7816E_A::_0, true => ISO_7816E_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == ISO_7816E_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == ISO_7816E_A::_1 } } #[doc = "Write proxy for field `ISO_7816E`"] pub struct ISO_7816E_W<'a> { w: &'a mut W, } impl<'a> ISO_7816E_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: ISO_7816E_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "ISO-7816 functionality is turned off/not enabled."] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(ISO_7816E_A::_0) } #[doc = "ISO-7816 functionality is turned on/enabled."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(ISO_7816E_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u8) & 0x01); self.w } } #[doc = "Transfer Type\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TTYPE_A { #[doc = "0: T = 0 per the ISO-7816 specification."] _0 = 0, #[doc = "1: T = 1 per the ISO-7816 specification."] _1 = 1, } impl From<TTYPE_A> for bool { #[inline(always)] fn from(variant: TTYPE_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `TTYPE`"] pub type TTYPE_R = crate::R<bool, TTYPE_A>; impl TTYPE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TTYPE_A { match self.bits { false => TTYPE_A::_0, true => TTYPE_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == TTYPE_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == TTYPE_A::_1 } } #[doc = "Write proxy for field `TTYPE`"] pub struct TTYPE_W<'a> { w: &'a mut W, } impl<'a> TTYPE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TTYPE_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "T = 0 per the ISO-7816 specification."] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(TTYPE_A::_0) } #[doc = "T = 1 per the ISO-7816 specification."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(TTYPE_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u8) & 0x01) << 1); self.w } } #[doc = "Detect Initial Character\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum INIT_A { #[doc = "0: Normal operating mode. Receiver does not seek to identify initial character."] _0 = 0, #[doc = "1: Receiver searches for initial character."] _1 = 1, } impl From<INIT_A> for bool { #[inline(always)] fn from(variant: INIT_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `INIT`"] pub type INIT_R = crate::R<bool, INIT_A>; impl INIT_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> INIT_A { match self.bits { false => INIT_A::_0, true => INIT_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == INIT_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == INIT_A::_1 } } #[doc = "Write proxy for field `INIT`"] pub struct INIT_W<'a> { w: &'a mut W, } impl<'a> INIT_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: INIT_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Normal operating mode. Receiver does not seek to identify initial character."] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(INIT_A::_0) } #[doc = "Receiver searches for initial character."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(INIT_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u8) & 0x01) << 2); self.w } } #[doc = "Generate NACK on Error\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ANACK_A { #[doc = "0: No NACK is automatically generated."] _0 = 0, #[doc = "1: A NACK is automatically generated if a parity error is detected or if an invalid initial character is detected."] _1 = 1, } impl From<ANACK_A> for bool { #[inline(always)] fn from(variant: ANACK_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `ANACK`"] pub type ANACK_R = crate::R<bool, ANACK_A>; impl ANACK_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> ANACK_A { match self.bits { false => ANACK_A::_0, true => ANACK_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == ANACK_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == ANACK_A::_1 } } #[doc = "Write proxy for field `ANACK`"] pub struct ANACK_W<'a> { w: &'a mut W, } impl<'a> ANACK_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: ANACK_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "No NACK is automatically generated."] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(ANACK_A::_0) } #[doc = "A NACK is automatically generated if a parity error is detected or if an invalid initial character is detected."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(ANACK_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u8) & 0x01) << 3); self.w } } #[doc = "Generate NACK on Overflow\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ONACK_A { #[doc = "0: The received data does not generate a NACK when the receipt of the data results in an overflow event."] _0 = 0, #[doc = "1: If the receiver buffer overflows, a NACK is automatically sent on a received character."] _1 = 1, } impl From<ONACK_A> for bool { #[inline(always)] fn from(variant: ONACK_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `ONACK`"] pub type ONACK_R = crate::R<bool, ONACK_A>; impl ONACK_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> ONACK_A { match self.bits { false => ONACK_A::_0, true => ONACK_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == ONACK_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == ONACK_A::_1 } } #[doc = "Write proxy for field `ONACK`"] pub struct ONACK_W<'a> { w: &'a mut W, } impl<'a> ONACK_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: ONACK_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "The received data does not generate a NACK when the receipt of the data results in an overflow event."] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(ONACK_A::_0) } #[doc = "If the receiver buffer overflows, a NACK is automatically sent on a received character."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(ONACK_A::_1) }
#[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u8) & 0x01) << 4); self.w } } impl R { #[doc = "Bit 0 - ISO-7816 Functionality Enabled"] #[inline(always)] pub fn iso_7816e(&self) -> ISO_7816E_R { ISO_7816E_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Transfer Type"] #[inline(always)] pub fn ttype(&self) -> TTYPE_R { TTYPE_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - Detect Initial Character"] #[inline(always)] pub fn init(&self) -> INIT_R { INIT_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - Generate NACK on Error"] #[inline(always)] pub fn anack(&self) -> ANACK_R { ANACK_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 4 - Generate NACK on Overflow"] #[inline(always)] pub fn onack(&self) -> ONACK_R { ONACK_R::new(((self.bits >> 4) & 0x01) != 0) } } impl W { #[doc = "Bit 0 - ISO-7816 Functionality Enabled"] #[inline(always)] pub fn iso_7816e(&mut self) -> ISO_7816E_W { ISO_7816E_W { w: self } } #[doc = "Bit 1 - Transfer Type"] #[inline(always)] pub fn ttype(&mut self) -> TTYPE_W { TTYPE_W { w: self } } #[doc = "Bit 2 - Detect Initial Character"] #[inline(always)] pub fn init(&mut self) -> INIT_W { INIT_W { w: self } } #[doc = "Bit 3 - Generate NACK on Error"] #[inline(always)] pub fn anack(&mut self) -> ANACK_W { ANACK_W { w: self } } #[doc = "Bit 4 - Generate NACK on Overflow"] #[inline(always)] pub fn onack(&mut self) -> ONACK_W { ONACK_W { w: self } } }
post_fleets_fleet_id_wings_wing_id_squads_not_found_easyjson.go
// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT. package esi import ( json "encoding/json" easyjson "github.com/mailru/easyjson" jlexer "github.com/mailru/easyjson/jlexer" jwriter "github.com/mailru/easyjson/jwriter" ) // suppress unused package warning var ( _ *json.RawMessage _ *jlexer.Lexer _ *jwriter.Writer _ easyjson.Marshaler ) func easyjson10365261DecodeGithubComAntihaxGoesiEsi(in *jlexer.Lexer, out *PostFleetsFleetIdWingsWingIdSquadsNotFoundList) { isTopLevel := in.IsStart() if in.IsNull() { in.Skip() *out = nil } else { in.Delim('[') if *out == nil { if !in.IsDelim(']') { *out = make(PostFleetsFleetIdWingsWingIdSquadsNotFoundList, 0, 4) } else { *out = PostFleetsFleetIdWingsWingIdSquadsNotFoundList{} } } else { *out = (*out)[:0] } for !in.IsDelim(']') { var v1 PostFleetsFleetIdWingsWingIdSquadsNotFound (v1).UnmarshalEasyJSON(in) *out = append(*out, v1) in.WantComma() } in.Delim(']') } if isTopLevel { in.Consumed() } } func easyjson10365261EncodeGithubComAntihaxGoesiEsi(out *jwriter.Writer, in PostFleetsFleetIdWingsWingIdSquadsNotFoundList) {
if in == nil && (out.Flags&jwriter.NilSliceAsEmpty) == 0 { out.RawString("null") } else { out.RawByte('[') for v2, v3 := range in { if v2 > 0 { out.RawByte(',') } (v3).MarshalEasyJSON(out) } out.RawByte(']') } } // MarshalJSON supports json.Marshaler interface func (v PostFleetsFleetIdWingsWingIdSquadsNotFoundList) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjson10365261EncodeGithubComAntihaxGoesiEsi(&w, v) return w.Buffer.BuildBytes(), w.Error } // MarshalEasyJSON supports easyjson.Marshaler interface func (v PostFleetsFleetIdWingsWingIdSquadsNotFoundList) MarshalEasyJSON(w *jwriter.Writer) { easyjson10365261EncodeGithubComAntihaxGoesiEsi(w, v) } // UnmarshalJSON supports json.Unmarshaler interface func (v *PostFleetsFleetIdWingsWingIdSquadsNotFoundList) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjson10365261DecodeGithubComAntihaxGoesiEsi(&r, v) return r.Error() } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *PostFleetsFleetIdWingsWingIdSquadsNotFoundList) UnmarshalEasyJSON(l *jlexer.Lexer) { easyjson10365261DecodeGithubComAntihaxGoesiEsi(l, v) } func easyjson10365261DecodeGithubComAntihaxGoesiEsi1(in *jlexer.Lexer, out *PostFleetsFleetIdWingsWingIdSquadsNotFound) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { in.Consumed() } in.Skip() return } in.Delim('{') for !in.IsDelim('}') { key := in.UnsafeString() in.WantColon() if in.IsNull() { in.Skip() in.WantComma() continue } switch key { case "error": out.Error_ = string(in.String()) default: in.SkipRecursive() } in.WantComma() } in.Delim('}') if isTopLevel { in.Consumed() } } func easyjson10365261EncodeGithubComAntihaxGoesiEsi1(out *jwriter.Writer, in PostFleetsFleetIdWingsWingIdSquadsNotFound) { out.RawByte('{') first := true _ = first if in.Error_ != "" { const prefix string = ",\"error\":" if first { first = false out.RawString(prefix[1:]) } else { out.RawString(prefix) } out.String(string(in.Error_)) } out.RawByte('}') } // MarshalJSON supports json.Marshaler interface func (v PostFleetsFleetIdWingsWingIdSquadsNotFound) MarshalJSON() ([]byte, error) { w := jwriter.Writer{} easyjson10365261EncodeGithubComAntihaxGoesiEsi1(&w, v) return w.Buffer.BuildBytes(), w.Error } // MarshalEasyJSON supports easyjson.Marshaler interface func (v PostFleetsFleetIdWingsWingIdSquadsNotFound) MarshalEasyJSON(w *jwriter.Writer) { easyjson10365261EncodeGithubComAntihaxGoesiEsi1(w, v) } // UnmarshalJSON supports json.Unmarshaler interface func (v *PostFleetsFleetIdWingsWingIdSquadsNotFound) UnmarshalJSON(data []byte) error { r := jlexer.Lexer{Data: data} easyjson10365261DecodeGithubComAntihaxGoesiEsi1(&r, v) return r.Error() } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *PostFleetsFleetIdWingsWingIdSquadsNotFound) UnmarshalEasyJSON(l *jlexer.Lexer) { easyjson10365261DecodeGithubComAntihaxGoesiEsi1(l, v) }
master_test.go
package machine import ( "testing" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" "github.com/openshift/installer/pkg/asset" "github.com/openshift/installer/pkg/asset/installconfig" "github.com/openshift/installer/pkg/asset/tls" "github.com/openshift/installer/pkg/ipnet" "github.com/openshift/installer/pkg/types" "github.com/openshift/installer/pkg/types/aws" ) // TestMasterGenerate tests generating the master asset. func
(t *testing.T) { installConfig := &installconfig.InstallConfig{ Config: &types.InstallConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "test-cluster", }, BaseDomain: "test-domain", Networking: &types.Networking{ ServiceCIDR: ipnet.MustParseCIDR("10.0.1.0/24"), }, Platform: types.Platform{ AWS: &aws.Platform{ Region: "us-east", }, }, ControlPlane: &types.MachinePool{ Name: "master", Replicas: pointer.Int64Ptr(3), }, }, } rootCA := &tls.RootCA{} err := rootCA.Generate(nil) assert.NoError(t, err, "unexpected error generating root CA") parents := asset.Parents{} parents.Add(installConfig, rootCA) master := &Master{} err = master.Generate(parents) assert.NoError(t, err, "unexpected error generating master asset") expectedIgnitionConfigNames := []string{ "master.ign", } actualFiles := master.Files() actualIgnitionConfigNames := make([]string, len(actualFiles)) for i, f := range actualFiles { actualIgnitionConfigNames[i] = f.Filename } assert.Equal(t, expectedIgnitionConfigNames, actualIgnitionConfigNames, "unexpected names for master ignition configs") }
TestMasterGenerate
lsf.py
# A simple CLI runner for slurm that can be used when running Galaxy from a # non-submit host and using a Slurm cluster. from logging import getLogger try: from galaxy.model import Job job_states = Job.states except ImportError: # Not in Galaxy, map Galaxy job states to Pulsar ones. from pulsar.util import enum job_states = enum(RUNNING='running', OK='complete', QUEUED='queued', ERROR="failed") from ..job import BaseJobExec log = getLogger(__name__) argmap = { 'memory': '-M', # There is code in job_script_kwargs relying on this name's setting 'cores': '-n', 'queue': '-q', 'working_dir': '-cwd', 'project': '-P' } class LSF(BaseJobExec): def
(self, **params): self.params = {} for k, v in params.items(): self.params[k] = v def job_script_kwargs(self, ofile, efile, job_name): scriptargs = {'-o': ofile, '-e': efile, '-J': job_name} # Map arguments using argmap. for k, v in self.params.items(): if k == 'plugin': continue try: if k == 'memory': # Memory requires both -m and -R rusage[mem=v] request scriptargs['-R'] = "\"rusage[mem=%s]\"" % v if not k.startswith('-'): k = argmap[k] scriptargs[k] = v except Exception: log.warning('Unrecognized long argument passed to LSF CLI plugin: %s' % k) # Generated template. template_scriptargs = '' for k, v in scriptargs.items(): template_scriptargs += '#BSUB %s %s\n' % (k, v) return dict(headers=template_scriptargs) def submit(self, script_file): # bsub returns Job <9147983> is submitted to default queue <research-rh7>. # This should be really handled outside with something like # parse_external. Currently CLI runner expect this to just send it in the last position # of the string. return "bsub <%s | awk '{ print $2}' | sed 's/[<>]//g'" % script_file def delete(self, job_id): return 'bkill %s' % job_id def get_status(self, job_ids=None): return "bjobs -a -o \"id stat\" -noheader" # check this def get_single_status(self, job_id): return "bjobs -o stat -noheader " + job_id def parse_status(self, status, job_ids): # Get status for each job, skipping header. rval = {} for line in status.splitlines(): job_id, state = line.split() if job_id in job_ids: # map job states to Galaxy job states. rval[job_id] = self._get_job_state(state) return rval def parse_single_status(self, status, job_id): if not status: # Job not found in LSF, most probably finished and forgotten. # lsf outputs: Job <num> is not found -- but that is on the stderr # Note: a very old failed job job will not be shown here either, # which would be badly handled here. So this only works well when Galaxy # is constantly monitoring the jobs. The logic here is that DONE jobs get forgotten # faster than failed jobs. log.warning("Job id '%s' not found LSF status check" % job_id) return job_states.OK return self._get_job_state(status) def get_failure_reason(self, job_id): return "bjobs -l " + job_id def parse_failure_reason(self, reason, job_id): # LSF will produce the following in the job output file: # TERM_MEMLIMIT: job killed after reaching LSF memory usage limit. # Exited with exit code 143. for line in reason.splitlines(): if "TERM_MEMLIMIT" in line: from galaxy.jobs import JobState return JobState.runner_states.MEMORY_LIMIT_REACHED return None def _get_job_state(self, state): # based on: # https://www.ibm.com/support/knowledgecenter/en/SSETD4_9.1.3/lsf_admin/job_state_lsf.html # https://www.ibm.com/support/knowledgecenter/en/SSETD4_9.1.2/lsf_command_ref/bjobs.1.html try: return { 'EXIT': job_states.ERROR, 'RUN': job_states.RUNNING, 'PEND': job_states.QUEUED, 'DONE': job_states.OK, 'PSUSP': job_states.ERROR, 'USUSP': job_states.ERROR, 'SSUSP': job_states.ERROR, 'UNKWN': job_states.ERROR, 'WAIT': job_states.QUEUED, 'ZOMBI': job_states.ERROR }.get(state) except KeyError: raise KeyError("Failed to map LSF status code [%s] to job state." % state) __all__ = ('LSF',)
__init__
bitcoin_zh_CN.ts
<TS language="zh_CN" version="2.1"> <context> <name>AddressBookPage</name> <message> <source>Right-click to edit address or label</source> <translation>鼠标右击编辑地址或标签</translation> </message> <message> <source>Create a new address</source> <translation>创建新地址</translation> </message> <message> <source>&amp;New</source> <translation>新建(&amp;N)</translation> </message> <message> <source>Copy the currently selected address to the system clipboard</source> <translation>复制当前选中的地址到系统剪贴板</translation> </message> <message> <source>&amp;Copy</source> <translation>复制(&amp;C)</translation> </message> <message> <source>C&amp;lose</source> <translation>关闭(&amp;l)</translation> </message> <message> <source>Delete the currently selected address from the list</source> <translation>从列表中删除选中的地址</translation> </message> <message> <source>Enter address or label to search</source> <translation>输入地址或标签来搜索</translation> </message> <message> <source>Export the data in the current tab to a file</source> <translation>将当前标签页数据导出到文件</translation> </message> <message> <source>&amp;Export</source> <translation>导出(&amp;E)</translation> </message> <message> <source>&amp;Delete</source> <translation>删除(&amp;D)</translation> </message> <message> <source>Choose the address to send coins to</source> <translation>选择要付钱过去的地址</translation> </message> <message> <source>Choose the address to receive coins with</source> <translation>选择要收钱进来的地址</translation> </message> <message> <source>C&amp;hoose</source> <translation>选择</translation> </message> <message> <source>Sending addresses</source> <translation>付款地址</translation> </message> <message> <source>Receiving addresses</source> <translation>收款地址</translation> </message> <message> <source>These are your Neuralinkbrainkeycoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source> <translation>这些是你要付款过去的莱特币地址。在付钱之前,务必要检查金额和收款地址是否正确。</translation> </message> <message> <source>These are your Neuralinkbrainkeycoin addresses for receiving payments. It is recommended to use a new receiving address for each transaction.</source> <translation>这些是你用来收款的莱特币地址。建议在每次交易时,都使用一个新的收款地址。</translation> </message> <message> <source>&amp;Copy Address</source> <translation>复制地址</translation> </message> <message> <source>Copy &amp;Label</source> <translation>复制标签</translation> </message> <message> <source>&amp;Edit</source> <translation>编辑</translation> </message> <message> <source>Export Address List</source> <translation>导出地址列表</translation> </message> <message> <source>Comma separated file (*.csv)</source> <translation>逗号分隔文件 (*.csv)</translation> </message> <message> <source>Exporting Failed</source> <translation>导出失败</translation> </message> <message> <source>There was an error trying to save the address list to %1. Please try again.</source> <translation>存储地址列表到 %1 时发生错误。请再试一次。</translation> </message> </context> <context> <name>AddressTableModel</name> <message> <source>Label</source> <translation>标签</translation> </message> <message> <source>Address</source> <translation>地址</translation> </message> <message> <source>(no label)</source> <translation>(无标签)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <source>Passphrase Dialog</source> <translation>密码对话框</translation> </message> <message> <source>Enter passphrase</source> <translation>输入密码</translation> </message> <message> <source>New passphrase</source> <translation>新密码</translation> </message> <message> <source>Repeat new passphrase</source> <translation>重复新密码</translation> </message> <message> <source>Show password</source> <translation>显示密码</translation> </message> <message> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;ten or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation>输入钱包的新密码。&lt;br/&gt;密码请用&lt;b&gt;10 个以上的随机字符&lt;/b&gt;,或是&lt;b&gt;8 个以上的字词&lt;/b&gt;。</translation> </message> <message> <source>Encrypt wallet</source> <translation>加密钱包</translation> </message> <message> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation>这个操作需要你的钱包密码来解锁钱包。</translation> </message> <message> <source>Unlock wallet</source> <translation>解锁钱包</translation> </message> <message> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation>这个操作需要你的钱包密码来把钱包解密。</translation> </message> <message> <source>Decrypt wallet</source> <translation>解密钱包</translation> </message> <message> <source>Change passphrase</source> <translation>修改密码</translation> </message> <message> <source>Enter the old passphrase and new passphrase to the wallet.</source> <translation>请输入钱包的旧密码和新密码。</translation> </message> <message> <source>Confirm wallet encryption</source> <translation>确认钱包加密</translation> </message> <message> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR LITECOINS&lt;/b&gt;!</source> <translation>警告: 如果把钱包加密后又忘记密码,你就会从此&lt;b&gt;失去其中所有的莱特币了&lt;/b&gt;!</translation> </message> <message> <source>Are you sure you wish to encrypt your wallet?</source> <translation>你确定要把钱包加密吗?</translation> </message> <message> <source>Wallet encrypted</source> <translation>钱包已加密</translation> </message> <message> <source>%1 will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your neuralinkbrainkeycoins from being stolen by malware infecting your computer.</source> <translation>%1 现在要关闭,以完成加密过程。请注意,加密钱包不能完全防止入侵你的电脑的恶意程序偷取钱币。</translation> </message> <message> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation>重要: 请改用新产生的有加密的钱包文件,来取代旧钱包文件的备份。为了安全性,当你开始使用新的有加密的钱包后,旧钱包文件的备份就不能再使用了。</translation> </message> <message> <source>Wallet encryption failed</source> <translation>钱包加密失败</translation> </message> <message> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation>因为内部错误导致钱包加密失败。你的钱包还是没加密。</translation> </message> <message> <source>The supplied passphrases do not match.</source> <translation>提供的密码不一致。</translation> </message> <message> <source>Wallet unlock failed</source> <translation>钱包解锁失败</translation> </message> <message> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation>输入用来解密钱包的密码不正确。</translation> </message> <message> <source>Wallet decryption failed</source> <translation>钱包解密失败</translation> </message> <message> <source>Wallet passphrase was successfully changed.</source> <translation>钱包密码修改成功。</translation> </message> <message> <source>Warning: The Caps Lock key is on!</source> <translation>警告: 大写字母锁定已开启!</translation> </message> </context> <context> <name>BanTableModel</name> <message> <source>IP/Netmask</source> <translation>IP/网络掩码</translation> </message> <message> <source>Banned Until</source> <translation>在此之前禁止:</translation> </message> </context> <context> <name>BitcoinGUI</name> <message> <source>Sign &amp;message...</source> <translation>消息签名(&amp;M)...</translation> </message> <message> <source>Synchronizing with network...</source> <translation>正在与网络同步...</translation> </message> <message> <source>&amp;Overview</source> <translation>概况(&amp;O)</translation> </message> <message> <source>Node</source> <translation>节点</translation> </message> <message> <source>Show general overview of wallet</source> <translation>显示钱包概况</translation> </message> <message> <source>&amp;Transactions</source> <translation>交易记录(&amp;T)</translation> </message> <message> <source>Browse transaction history</source> <translation>查看交易历史</translation> </message> <message> <source>E&amp;xit</source> <translation>退出(&amp;X)</translation> </message> <message> <source>Quit application</source> <translation>退出程序</translation> </message> <message> <source>&amp;About %1</source> <translation>关于 %1</translation> </message> <message> <source>Show information about %1</source> <translation>显示 %1 相关信息</translation> </message> <message> <source>About &amp;Qt</source> <translation>关于Qt(&amp;Q)</translation> </message> <message> <source>Show information about Qt</source> <translation>显示 Qt 相关信息</translation> </message> <message> <source>&amp;Options...</source> <translation>选项(&amp;O)...</translation> </message> <message> <source>Modify configuration options for %1</source> <translation>修改%1配置选项</translation> </message> <message> <source>&amp;Encrypt Wallet...</source> <translation>加密钱包(&amp;E)...</translation> </message> <message> <source>&amp;Backup Wallet...</source> <translation>备份钱包(&amp;B)...</translation> </message> <message> <source>&amp;Change Passphrase...</source> <translation>更改密码(&amp;C)...</translation> </message> <message> <source>&amp;Sending addresses...</source> <translation>正在发送地址(&amp;S)...</translation> </message> <message> <source>&amp;Receiving addresses...</source> <translation>正在接收地址(&amp;R)...</translation> </message> <message> <source>Open &amp;URI...</source> <translation>打开 &amp;URI...</translation> </message> <message> <source>Click to disable network activity.</source> <translation>点击禁用网络活动。</translation> </message> <message> <source>Network activity disabled.</source> <translation>网络活动已禁用。</translation> </message> <message> <source>Click to enable network activity again.</source> <translation>点击重新开启网络活动。</translation> </message> <message> <source>Syncing Headers (%1%)...</source> <translation>同步区块头 (%1%)...</translation> </message> <message> <source>Reindexing blocks on disk...</source> <translation>正在为数据块重建索引...</translation> </message> <message> <source>Send coins to a Neuralinkbrainkeycoin address</source> <translation>向一个莱特币地址发送莱特币</translation> </message> <message> <source>Backup wallet to another location</source> <translation>备份钱包到其他文件夹</translation> </message> <message> <source>Change the passphrase used for wallet encryption</source> <translation>更改钱包加密口令</translation> </message> <message> <source>&amp;Debug window</source> <translation>调试窗口(&amp;D)</translation> </message> <message> <source>Open debugging and diagnostic console</source> <translation>打开调试和诊断控制台</translation> </message> <message> <source>&amp;Verify message...</source> <translation>验证消息(&amp;V)...</translation> </message> <message> <source>Neuralinkbrainkeycoin</source> <translation>莱特币</translation> </message> <message> <source>Wallet</source> <translation>钱包</translation> </message> <message> <source>&amp;Send</source> <translation>发送(&amp;S)</translation> </message> <message> <source>&amp;Receive</source> <translation>接收(&amp;R)</translation> </message> <message> <source>&amp;Show / Hide</source> <translation>显示 / 隐藏(&amp;S)</translation> </message> <message> <source>Show or hide the main Window</source> <translation>显示或隐藏主窗口</translation> </message> <message> <source>Encrypt the private keys that belong to your wallet</source> <translation>对钱包中的私钥加密</translation> </message> <message> <source>Sign messages with your Neuralinkbrainkeycoin addresses to prove you own them</source> <translation>用莱特币地址关联的私钥为消息签名,以证明您拥有这个莱特币地址</translation> </message> <message> <source>Verify messages to ensure they were signed with specified Neuralinkbrainkeycoin addresses</source> <translation>校验消息,确保该消息是由指定的莱特币地址所有者签名的</translation> </message> <message> <source>&amp;File</source> <translation>文件(&amp;F)</translation> </message> <message> <source>&amp;Settings</source> <translation>设置(&amp;S)</translation> </message> <message> <source>&amp;Help</source> <translation>帮助(&amp;H)</translation> </message> <message> <source>Tabs toolbar</source> <translation>分页工具栏</translation> </message> <message> <source>Request payments (generates QR codes and neuralinkbrainkeycoin: URIs)</source> <translation>请求支付 (生成二维码和 neuralinkbrainkeycoin: URI)</translation> </message> <message> <source>Show the list of used sending addresses and labels</source> <translation>显示用过的发送地址和标签的列表</translation> </message> <message> <source>Show the list of used receiving addresses and labels</source> <translation>显示用过的接收地址和标签的列表</translation> </message> <message> <source>Open a neuralinkbrainkeycoin: URI or payment request</source> <translation>打开一个 neuralinkbrainkeycoin: URI 或支付请求</translation> </message> <message> <source>&amp;Command-line options</source> <translation>命令行选项(&amp;C)</translation> </message> <message> <source>Indexing blocks on disk...</source> <translation>正在为数据块建立索引...</translation> </message> <message> <source>Processing blocks on disk...</source> <translation>正在处理数据块...</translation> </message> <message> <source>%1 behind</source> <translation>落后 %1 </translation> </message> <message> <source>Last received block was generated %1 ago.</source> <translation>最新收到的区块产生于 %1。</translation> </message> <message> <source>Transactions after this will not yet be visible.</source> <translation>在此之后的交易尚未可见</translation> </message> <message> <source>Error</source> <translation>错误</translation> </message> <message> <source>Warning</source> <translation>警告</translation> </message> <message> <source>Information</source> <translation>信息</translation> </message> <message> <source>Up to date</source> <translation>已是最新</translation> </message> <message> <source>Show the %1 help message to get a list with possible Neuralinkbrainkeycoin command-line options</source> <translation>显示 %1 帮助信息,获取可用命令行选项列表</translation> </message> <message> <source>%1 client</source> <translation>%1 客戶</translation> </message> <message> <source>Connecting to peers...</source> <translation>正在连接到节点……</translation> </message> <message> <source>Catching up...</source> <translation>更新中...</translation> </message> <message> <source>Date: %1 </source> <translation>日期: %1 </translation> </message> <message> <source>Amount: %1 </source> <translation>金额: %1 </translation> </message> <message> <source>Type: %1 </source> <translation>类型: %1 </translation> </message> <message> <source>Label: %1 </source> <translation>标签: %1 </translation> </message> <message> <source>Address: %1 </source> <translation>地址: %1 </translation> </message> <message> <source>Sent transaction</source> <translation>发送交易</translation> </message> <message> <source>Incoming transaction</source> <translation>流入交易</translation> </message> <message> <source>HD key generation is &lt;b&gt;enabled&lt;/b&gt;</source> <translation>HD密钥生成&lt;b&gt;启用&lt;/b&gt;</translation> </message> <message> <source>HD key generation is &lt;b&gt;disabled&lt;/b&gt;</source> <translation>HD密钥生成&lt;b&gt;禁用&lt;/b&gt;</translation> </message> <message> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>钱包已被&lt;b&gt;加密&lt;/b&gt;,当前为&lt;b&gt;解锁&lt;/b&gt;状态</translation> </message> <message> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>钱包已被&lt;b&gt;加密&lt;/b&gt;,当前为&lt;b&gt;锁定&lt;/b&gt;状态</translation> </message> <message> <source>A fatal error occurred. Neuralinkbrainkeycoin can no longer continue safely and will quit.</source> <translation>发生严重错误。客户端无法安全地继续运行,即将退出。</translation> </message> </context> <context> <name>CoinControlDialog</name> <message> <source>Coin Selection</source> <translation>选择钱币</translation> </message> <message> <source>Quantity:</source> <translation>总量:</translation> </message> <message> <source>Bytes:</source> <translation>字节:</translation> </message> <message> <source>Amount:</source> <translation>金额:</translation> </message> <message> <source>Fee:</source> <translation>费用:</translation> </message> <message> <source>Dust:</source> <translation>小额:</translation> </message> <message> <source>After Fee:</source> <translation>加上交易费用后:</translation> </message> <message> <source>Change:</source> <translation>变更 : </translation> </message> <message> <source>(un)select all</source> <translation>(不)全选</translation> </message> <message> <source>Tree mode</source> <translation>树状模式</translation> </message> <message> <source>List mode</source> <translation>列表模式</translation> </message> <message> <source>Amount</source> <translation>金额</translation> </message> <message> <source>Received with label</source> <translation>按标签收款</translation> </message> <message> <source>Received with address</source> <translation>按地址收款</translation> </message> <message> <source>Date</source> <translation>日期</translation> </message> <message> <source>Confirmations</source> <translation>确认</translation> </message> <message> <source>Confirmed</source> <translation>已确认</translation> </message> <message> <source>Copy address</source> <translation>复制地址</translation> </message> <message> <source>Copy label</source> <translation>复制标签</translation> </message> <message> <source>Copy amount</source> <translation>复制金额</translation> </message> <message> <source>Copy transaction ID</source> <translation>复制交易识别码</translation> </message> <message> <source>Lock unspent</source> <translation>锁定未花费</translation> </message> <message> <source>Unlock unspent</source> <translation>解锁未花费</translation> </message> <message> <source>Copy quantity</source> <translation>复制数目</translation> </message> <message> <source>Copy fee</source> <translation>复制手续费</translation> </message> <message> <source>Copy after fee</source> <translation>复制计费后金额</translation> </message> <message> <source>Copy bytes</source> <translation>复制字节数</translation> </message> <message> <source>Copy dust</source> <translation>复制零散金额</translation> </message> <message> <source>Copy change</source> <translation>复制找零金额</translation> </message> <message> <source>(%1 locked)</source> <translation>(锁定 %1 枚)</translation> </message> <message> <source>yes</source> <translation>是</translation> </message> <message> <source>no</source> <translation>否</translation> </message> <message> <source>This label turns red if any recipient receives an amount smaller than the current dust threshold.</source> <translation>当任何一个收款金额小于目前的零散金额上限时,文字会变红色。</translation> </message> <message> <source>Can vary +/- %1 satoshi(s) per input.</source> <translation>每组输入可能有 +/- %1 个 satoshi 的误差。</translation> </message> <message> <source>(no label)</source> <translation>(无标签)</translation> </message> <message> <source>change from %1 (%2)</source> <translation>找零前是 %1 (%2)</translation> </message> <message> <source>(change)</source> <translation>(找零)</translation> </message> </context> <context> <name>EditAddressDialog</name> <message> <source>Edit Address</source> <translation>编辑地址</translation> </message> <message> <source>&amp;Label</source> <translation>标签(&amp;L)</translation> </message> <message> <source>The label associated with this address list entry</source> <translation>与此地址相关的标签项</translation> </message> <message> <source>The address associated with this address list entry. This can only be modified for sending addresses.</source> <translation>该地址已与地址列表中的条目关联,只能被发送地址修改。</translation> </message> <message> <source>&amp;Address</source> <translation>地址(&amp;A)</translation> </message> <message> <source>New sending address</source> <translation>新建付款地址</translation> </message> <message> <source>Edit receiving address</source> <translation>编辑收款地址</translation> </message>
<translation>编辑付款地址</translation> </message> <message> <source>The entered address "%1" is not a valid Neuralinkbrainkeycoin address.</source> <translation>输入的地址 %1 并不是有效的莱特币地址。</translation> </message> <message> <source>Could not unlock wallet.</source> <translation>无法将钱包解锁。</translation> </message> <message> <source>New key generation failed.</source> <translation>产生新的密钥失败了。</translation> </message> </context> <context> <name>FreespaceChecker</name> <message> <source>A new data directory will be created.</source> <translation>一个新的数据目录将被创建。</translation> </message> <message> <source>name</source> <translation>名称</translation> </message> <message> <source>Directory already exists. Add %1 if you intend to create a new directory here.</source> <translation>目录已存在。如果您打算在这里创建一个新目录,添加 %1。</translation> </message> <message> <source>Path already exists, and is not a directory.</source> <translation>路径已存在,并且不是一个目录。</translation> </message> <message> <source>Cannot create data directory here.</source> <translation>无法在此创建数据目录。</translation> </message> </context> <context> <name>HelpMessageDialog</name> <message> <source>version</source> <translation>版本</translation> </message> <message> <source>(%1-bit)</source> <translation>(%1 位)</translation> </message> <message> <source>About %1</source> <translation>關於 %1</translation> </message> <message> <source>Command-line options</source> <translation>命令行选项</translation> </message> </context> <context> <name>Intro</name> <message> <source>Welcome</source> <translation>欢迎</translation> </message> <message> <source>Welcome to %1.</source> <translation>欢迎使用 %1</translation> </message> <message> <source>As this is the first time the program is launched, you can choose where %1 will store its data.</source> <translation>由于这是第一次启动此程序,您可以选择%1的数据所存储的位置</translation> </message> <message> <source>When you click OK, %1 will begin to download and process the full %4 block chain (%2GB) starting with the earliest transactions in %3 when %4 initially launched.</source> <translation>当你点击确认后,%1 将会在 %4 启动时从 %3 中最早的交易开始,下载并处理完整的 %4 区块链 (%2GB)。</translation> </message> <message> <source>If you have chosen to limit block chain storage (pruning), the historical data must still be downloaded and processed, but will be deleted afterward to keep your disk usage low.</source> <translation>如果你选择限制区块链存储大小(区块链裁剪模式),程序依然会下载并处理全部历史数据,此后才会删除不必须的部分,占用最少的存储空间。</translation> </message> <message> <source>Use the default data directory</source> <translation>使用默认的数据目录</translation> </message> <message> <source>Use a custom data directory:</source> <translation>使用自定义的数据目录:</translation> </message> <message> <source>Neuralinkbrainkeycoin</source> <translation>莱特币</translation> </message> <message> <source>At least %1 GB of data will be stored in this directory, and it will grow over time.</source> <translation>此目录中至少会保存 %1 GB 的数据,并且尺寸还会随着时间增长。</translation> </message> <message> <source>Approximately %1 GB of data will be stored in this directory.</source> <translation>会在此目录中存储约 %1 GB 的数据。</translation> </message> <message> <source>%1 will download and store a copy of the Neuralinkbrainkeycoin block chain.</source> <translation>%1 将会下载并存储莱特币区块链。</translation> </message> <message> <source>The wallet will also be stored in this directory.</source> <translation>钱包也会被保存在这个目录中。</translation> </message> <message> <source>Error: Specified data directory "%1" cannot be created.</source> <translation>错误:无法创建 指定的数据目录 "%1" </translation> </message> <message> <source>Error</source> <translation>错误</translation> </message> </context> <context> <name>ModalOverlay</name> <message> <source>Form</source> <translation>表单</translation> </message> <message> <source>Recent transactions may not yet be visible, and therefore your wallet's balance might be incorrect. This information will be correct once your wallet has finished synchronizing with the neuralinkbrainkeycoin network, as detailed below.</source> <translation>近期交易可能尚未显示,因此当前余额可能不准确。以上信息将在与莱特币网络完全同步后更正。详情如下</translation> </message> <message> <source>Attempting to spend neuralinkbrainkeycoins that are affected by not-yet-displayed transactions will not be accepted by the network.</source> <translation>尝试使用受未可见交易影响的余额将不被网络接受。</translation> </message> <message> <source>Number of blocks left</source> <translation>剩余区块数量</translation> </message> <message> <source>Unknown...</source> <translation>未知</translation> </message> <message> <source>Last block time</source> <translation>上一数据块时间</translation> </message> <message> <source>Progress</source> <translation>进度</translation> </message> <message> <source>Progress increase per hour</source> <translation>每小时进度增加</translation> </message> <message> <source>calculating...</source> <translation>正在计算</translation> </message> <message> <source>Estimated time left until synced</source> <translation>预计剩余同步时间</translation> </message> <message> <source>Hide</source> <translation>隐藏</translation> </message> <message> <source>Unknown. Syncing Headers (%1)...</source> <translation>未知状态。同步区块头(%1)...</translation> </message> </context> <context> <name>OpenURIDialog</name> <message> <source>Open URI</source> <translation>打开 URI</translation> </message> <message> <source>Open payment request from URI or file</source> <translation>打开来自URI或文件的付款请求 </translation> </message> <message> <source>URI:</source> <translation>URI: </translation> </message> <message> <source>Select payment request file</source> <translation>选择付款请求文件 </translation> </message> <message> <source>Select payment request file to open</source> <translation>选择要打开的付款请求文件</translation> </message> </context> <context> <name>OptionsDialog</name> <message> <source>Options</source> <translation>选项</translation> </message> <message> <source>&amp;Main</source> <translation>主要(&amp;M)</translation> </message> <message> <source>Automatically start %1 after logging in to the system.</source> <translation>在登入系统后自动启动 %1</translation> </message> <message> <source>&amp;Start %1 on system login</source> <translation>系统登入时启动 %1</translation> </message> <message> <source>Size of &amp;database cache</source> <translation>数据库缓存大小(&amp;D)</translation> </message> <message> <source>MB</source> <translation>MB</translation> </message> <message> <source>Number of script &amp;verification threads</source> <translation>脚本验证线程数(&amp;V)</translation> </message> <message> <source>IP address of the proxy (e.g. IPv4: 127.0.0.1 / IPv6: ::1)</source> <translation>代理的 IP 地址 (例如 IPv4: 127.0.0.1 / IPv6: ::1)</translation> </message> <message> <source>Shows if the supplied default SOCKS5 proxy is used to reach peers via this network type.</source> <translation>显示默认的SOCKS5代理是否被用于在该类型的网络下连接同伴</translation> </message> <message> <source>Use separate SOCKS&amp;5 proxy to reach peers via Tor hidden services:</source> <translation>通过Tor隐藏服务连接节点时使用不同的SOCKS&amp;5代理:</translation> </message> <message> <source>Hide the icon from the system tray.</source> <translation>隐藏系统通知区图标</translation> </message> <message> <source>&amp;Hide tray icon</source> <translation>隐藏通知区图标(&amp;H)</translation> </message> <message> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Exit in the menu.</source> <translation>窗口被关闭时最小化而不是退出应用程序。当此选项启用时,应用程序只会在菜单中选择退出时退出。</translation> </message> <message> <source>Third party URLs (e.g. a block explorer) that appear in the transactions tab as context menu items. %s in the URL is replaced by transaction hash. Multiple URLs are separated by vertical bar |.</source> <translation>出现在交易的选项卡的上下文菜单项的第三方网址 (例如:区块链接查询) 。 %s的URL被替换为交易哈希。多个的URL需要竖线 | 分隔。</translation> </message> <message> <source>Active command-line options that override above options:</source> <translation>有效的命令行参数覆盖上述选项:</translation> </message> <message> <source>Open the %1 configuration file from the working directory.</source> <translation>从工作目录下打开配置文件 %1。</translation> </message> <message> <source>Open Configuration File</source> <translation>打开配置文件</translation> </message> <message> <source>Reset all client options to default.</source> <translation>恢复客户端的缺省设置</translation> </message> <message> <source>&amp;Reset Options</source> <translation>恢复缺省设置(&amp;R)</translation> </message> <message> <source>&amp;Network</source> <translation>网络(&amp;N)</translation> </message> <message> <source>(0 = auto, &lt;0 = leave that many cores free)</source> <translation>(0 = 自动, &lt;0 = 保持指定数量的CPU核心空闲)</translation> </message> <message> <source>W&amp;allet</source> <translation>钱包(&amp;A)</translation> </message> <message> <source>Expert</source> <translation>专家</translation> </message> <message> <source>Enable coin &amp;control features</source> <translation>启动货币控制功能(&amp;C)</translation> </message> <message> <source>If you disable the spending of unconfirmed change, the change from a transaction cannot be used until that transaction has at least one confirmation. This also affects how your balance is computed.</source> <translation>如果禁用未确认的零钱,则零钱至少需要1个确认才能使用。同时账户余额计算会受到影响。</translation> </message> <message> <source>&amp;Spend unconfirmed change</source> <translation>使用未经确认的零钱(&amp;S)</translation> </message> <message> <source>Automatically open the Neuralinkbrainkeycoin client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation>自动在路由器中打开莱特币端口。只有当您的路由器开启了 UPnP 选项时此功能才有效。</translation> </message> <message> <source>Map port using &amp;UPnP</source> <translation>使用 &amp;UPnP 映射端口</translation> </message> <message> <source>Accept connections from outside.</source> <translation>接收外部连接。</translation> </message> <message> <source>Allow incomin&amp;g connections</source> <translation>允许流入连接(&amp;G)</translation> </message> <message> <source>Connect to the Neuralinkbrainkeycoin network through a SOCKS5 proxy.</source> <translation>通过 SOCKS5 代理连接莱特币网络。</translation> </message> <message> <source>&amp;Connect through SOCKS5 proxy (default proxy):</source> <translation>通过 SO&amp;CKS5 代理连接(默认代理):</translation> </message> <message> <source>Proxy &amp;IP:</source> <translation>代理服务器 &amp;IP:</translation> </message> <message> <source>&amp;Port:</source> <translation>端口(&amp;P):</translation> </message> <message> <source>Port of the proxy (e.g. 9050)</source> <translation>代理端口(例如 9050)</translation> </message> <message> <source>Used for reaching peers via:</source> <translation>连接到同伴的方式:</translation> </message> <message> <source>IPv4</source> <translation>IPv4</translation> </message> <message> <source>IPv6</source> <translation>IPv6</translation> </message> <message> <source>Tor</source> <translation>Tor</translation> </message> <message> <source>Connect to the Neuralinkbrainkeycoin network through a separate SOCKS5 proxy for Tor hidden services.</source> <translation>在 Tor 匿名网络下通过不同的 SOCKS5 代理连接莱特币网络</translation> </message> <message> <source>&amp;Window</source> <translation>窗口(&amp;W)</translation> </message> <message> <source>Show only a tray icon after minimizing the window.</source> <translation>最小化窗口后仅显示托盘图标</translation> </message> <message> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation>最小化到托盘(&amp;M)</translation> </message> <message> <source>M&amp;inimize on close</source> <translation>单击关闭按钮最小化(&amp;I)</translation> </message> <message> <source>&amp;Display</source> <translation>显示(&amp;D)</translation> </message> <message> <source>User Interface &amp;language:</source> <translation>用户界面语言(&amp;L):</translation> </message> <message> <source>The user interface language can be set here. This setting will take effect after restarting %1.</source> <translation>可以在这里设定用户界面的语言。这个设定在重启 %1 后才会生效。</translation> </message> <message> <source>&amp;Unit to show amounts in:</source> <translation>莱特币金额单位(&amp;U):</translation> </message> <message> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation>选择莱特币单位。</translation> </message> <message> <source>Whether to show coin control features or not.</source> <translation>是否需要交易源地址控制功能。</translation> </message> <message> <source>&amp;Third party transaction URLs</source> <translation>第三方交易网址(&amp;T)</translation> </message> <message> <source>&amp;OK</source> <translation>确定(&amp;O)</translation> </message> <message> <source>&amp;Cancel</source> <translation>取消(&amp;C)</translation> </message> <message> <source>default</source> <translation>默认</translation> </message> <message> <source>none</source> <translation>无</translation> </message> <message> <source>Confirm options reset</source> <translation>确认恢复缺省设置</translation> </message> <message> <source>Client restart required to activate changes.</source> <translation>更改生效需要重启客户端。</translation> </message> <message> <source>Client will be shut down. Do you want to proceed?</source> <translation>客户端即将关闭,您想继续吗?</translation> </message> <message> <source>Configuration options</source> <translation>配置选项</translation> </message> <message> <source>The configuration file is used to specify advanced user options which override GUI settings. Additionally, any command-line options will override this configuration file.</source> <translation>配置文件可以用来设置高级选项。配置文件会覆盖设置界面窗口中的选项。此外,命令行会覆盖配置文件指定的选项。</translation> </message> <message> <source>Error</source> <translation>错误</translation> </message> <message> <source>The configuration file could not be opened.</source> <translation>配置文件无法打开。</translation> </message> <message> <source>This change would require a client restart.</source> <translation>此更改需要重启客户端。</translation> </message> <message> <source>The supplied proxy address is invalid.</source> <translation>提供的代理服务器地址无效。</translation> </message> </context> <context> <name>OverviewPage</name> <message> <source>Form</source> <translation>表单</translation> </message> <message> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the Neuralinkbrainkeycoin network after a connection is established, but this process has not completed yet.</source> <translation>现在显示的消息可能是过期的。在连接上莱特币网络节点后,您的钱包将自动与网络同步,但是这个过程还没有完成。</translation> </message> <message> <source>Watch-only:</source> <translation>仅观察:</translation> </message> <message> <source>Available:</source> <translation>可使用的余额:</translation> </message> <message> <source>Your current spendable balance</source> <translation>您当前可使用的余额</translation> </message> <message> <source>Pending:</source> <translation>等待中的余额:</translation> </message> <message> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the spendable balance</source> <translation>尚未确认的交易总额,未计入当前余额</translation> </message> <message> <source>Immature:</source> <translation>未成熟的:</translation> </message> <message> <source>Mined balance that has not yet matured</source> <translation>尚未成熟的挖矿收入余额</translation> </message> <message> <source>Balances</source> <translation>余额</translation> </message> <message> <source>Total:</source> <translation>总额:</translation> </message> <message> <source>Your current total balance</source> <translation>您当前的总余额</translation> </message> <message> <source>Your current balance in watch-only addresses</source> <translation>您当前 观察地址(watch-only address)的余额 </translation> </message> <message> <source>Spendable:</source> <translation>可使用:</translation> </message> <message> <source>Recent transactions</source> <translation>最近交易记录</translation> </message> <message> <source>Unconfirmed transactions to watch-only addresses</source> <translation>观察地址(watch-only address)的未确认交易记录 </translation> </message> <message> <source>Mined balance in watch-only addresses that has not yet matured</source> <translation>观察地址(watch-only address)中尚未成熟(matured)的挖矿收入余额:</translation> </message> <message> <source>Current total balance in watch-only addresses</source> <translation>观察地址(watch-only address)中的当前总余额 </translation> </message> </context> <context> <name>PaymentServer</name> <message> <source>Payment request error</source> <translation>要求付款时发生错误</translation> </message> <message> <source>Cannot start neuralinkbrainkeycoin: click-to-pay handler</source> <translation>无法启动 neuralinkbrainkeycoin 协议的“ 一键支付”处理器</translation> </message> <message> <source>URI handling</source> <translation>URI 处理</translation> </message> <message> <source>Payment request fetch URL is invalid: %1</source> <translation>取得付款请求的 URL 无效: %1</translation> </message> <message> <source>Invalid payment address %1</source> <translation>无效的付款地址 %1</translation> </message> <message> <source>URI cannot be parsed! This can be caused by an invalid Neuralinkbrainkeycoin address or malformed URI parameters.</source> <translation>无法解析 URI 地址!可能是因为莱特币地址无效,或是 URI 参数格式错误。</translation> </message> <message> <source>Payment request file handling</source> <translation>处理付款请求文件</translation> </message> <message> <source>Payment request file cannot be read! This can be caused by an invalid payment request file.</source> <translation>无法读取付款请求文件!可能是文件无效造成的。</translation> </message> <message> <source>Payment request rejected</source> <translation>付款请求已被拒绝</translation> </message> <message> <source>Payment request network doesn't match client network.</source> <translation>付款请求的网络类型跟客户端不符。</translation> </message> <message> <source>Payment request expired.</source> <translation>付款请求已过期。</translation> </message> <message> <source>Unverified payment requests to custom payment scripts are unsupported.</source> <translation>不支持到自定义付款脚本的未验证付款请求。</translation> </message> <message> <source>Invalid payment request.</source> <translation>无效的支付请求。</translation> </message> <message> <source>Requested payment amount of %1 is too small (considered dust).</source> <translation>请求支付的金额 %1 太小 (可被忽略)。</translation> </message> <message> <source>Refund from %1</source> <translation>来自 %1 的退款</translation> </message> <message> <source>Payment request %1 is too large (%2 bytes, allowed %3 bytes).</source> <translation>支付请求 %1 太大 (%2 字节。只允许 %3 字节)。</translation> </message> <message> <source>Error communicating with %1: %2</source> <translation>与 %1 通信出错: %2</translation> </message> <message> <source>Payment request cannot be parsed!</source> <translation>无法解析付款请求!</translation> </message> <message> <source>Bad response from server %1</source> <translation>来自服务器 %1 的响应无效</translation> </message> <message> <source>Network request error</source> <translation>网络请求出错</translation> </message> <message> <source>Payment acknowledged</source> <translation>付款已确认</translation> </message> </context> <context> <name>PeerTableModel</name> <message> <source>User Agent</source> <translation>用户代理</translation> </message> <message> <source>Node/Service</source> <translation>节点/服务</translation> </message> <message> <source>NodeId</source> <translation>节点ID</translation> </message> <message> <source>Ping</source> <translation> </translation> </message> <message> <source>Sent</source> <translation>发送</translation> </message> <message> <source>Received</source> <translation>收到</translation> </message> </context> <context> <name>QObject</name> <message> <source>Amount</source> <translation>金额</translation> </message> <message> <source>Enter a Neuralinkbrainkeycoin address (e.g. %1)</source> <translation>请输入一个莱特币地址 (例如 %1)</translation> </message> <message> <source>%1 d</source> <translation>%1 天</translation> </message> <message> <source>%1 h</source> <translation>%1 小时</translation> </message> <message> <source>%1 m</source> <translation>%1 分钟</translation> </message> <message> <source>%1 s</source> <translation>%1 秒</translation> </message> <message> <source>None</source> <translation>无</translation> </message> <message> <source>N/A</source> <translation>不可用</translation> </message> <message> <source>%1 ms</source> <translation>%1 毫秒</translation> </message> <message> <source>%1 and %2</source> <translation>%1 和 %2</translation> </message> <message> <source>%1 B</source> <translation>%1 字节</translation> </message> <message> <source>%1 KB</source> <translation>%1 KB</translation> </message> <message> <source>%1 MB</source> <translation>%1 MB</translation> </message> <message> <source>%1 GB</source> <translation>%1 GB</translation> </message> <message> <source>%1 didn't yet exit safely...</source> <translation>%1 尚未安全退出</translation> </message> <message> <source>unknown</source> <translation>未知</translation> </message> </context> <context> <name>QObject::QObject</name> <message> <source>Error: Specified data directory "%1" does not exist.</source> <translation>错误:指定的数据目录“%1”不存在。</translation> </message> <message> <source>Error: %1</source> <translation>错误:%1</translation> </message> </context> <context> <name>QRImageWidget</name> <message> <source>&amp;Save Image...</source> <translation>保存图片(&amp;S)...</translation> </message> <message> <source>&amp;Copy Image</source> <translation>复制图片</translation> </message> <message> <source>Save QR Code</source> <translation>保存二维码</translation> </message> <message> <source>PNG Image (*.png)</source> <translation>PNG 图像(*.png)</translation> </message> </context> <context> <name>RPCConsole</name> <message> <source>N/A</source> <translation>不可用</translation> </message> <message> <source>Client version</source> <translation>客户端版本</translation> </message> <message> <source>&amp;Information</source> <translation>信息</translation> </message> <message> <source>Debug window</source> <translation>调试窗口</translation> </message> <message> <source>General</source> <translation>常规</translation> </message> <message> <source>Using BerkeleyDB version</source> <translation>使用的 BerkeleyDB 版本</translation> </message> <message> <source>Datadir</source> <translation>数据目录</translation> </message> <message> <source>Startup time</source> <translation>启动时间</translation> </message> <message> <source>Network</source> <translation>网络</translation> </message> <message> <source>Name</source> <translation>姓名</translation> </message> <message> <source>Number of connections</source> <translation>连接数</translation> </message> <message> <source>Block chain</source> <translation>数据链</translation> </message> <message> <source>Current number of blocks</source> <translation>当前数据块数量</translation> </message> <message> <source>Memory Pool</source> <translation>资金池</translation> </message> <message> <source>Current number of transactions</source> <translation>当前交易数量</translation> </message> <message> <source>Memory usage</source> <translation>内存使用</translation> </message> <message> <source>&amp;Reset</source> <translation>&amp;重启</translation> </message> <message> <source>Received</source> <translation>收到</translation> </message> <message> <source>Sent</source> <translation>发送</translation> </message> <message> <source>&amp;Peers</source> <translation>同伴(&amp;P)</translation> </message> <message> <source>Banned peers</source> <translation>节点黑名单</translation> </message> <message> <source>Select a peer to view detailed information.</source> <translation>选择节点查看详细信息。</translation> </message> <message> <source>Whitelisted</source> <translation>白名单</translation> </message> <message> <source>Direction</source> <translation>方向</translation> </message> <message> <source>Version</source> <translation>版本</translation> </message> <message> <source>Starting Block</source> <translation>正在启动数据块</translation> </message> <message> <source>Synced Headers</source> <translation>同步区块头</translation> </message> <message> <source>Synced Blocks</source> <translation>同步区块链</translation> </message> <message> <source>User Agent</source> <translation>用户代理</translation> </message> <message> <source>Decrease font size</source> <translation>缩小文字</translation> </message> <message> <source>Increase font size</source> <translation>放大文字</translation> </message> <message> <source>Services</source> <translation>服务</translation> </message> <message> <source>Ban Score</source> <translation>禁止得分</translation> </message> <message> <source>Connection Time</source> <translation>连接时间</translation> </message> <message> <source>Last Send</source> <translation>最后发送</translation> </message> <message> <source>Last Receive</source> <translation>最后接收</translation> </message> <message> <source>Ping Time</source> <translation>Ping 时间</translation> </message> <message> <source>The duration of a currently outstanding ping.</source> <translation>目前这一次 ping 已经过去的时间。</translation> </message> <message> <source>Ping Wait</source> <translation>Ping等待</translation> </message> <message> <source>Min Ping</source> <translation>最小Ping值</translation> </message> <message> <source>Time Offset</source> <translation>时间偏移</translation> </message> <message> <source>Last block time</source> <translation>上一数据块时间</translation> </message> <message> <source>&amp;Open</source> <translation>打开(&amp;O)</translation> </message> <message> <source>&amp;Console</source> <translation>控制台(&amp;C)</translation> </message> <message> <source>&amp;Network Traffic</source> <translation>网络流量(&amp;N)</translation> </message> <message> <source>Totals</source> <translation>总数</translation> </message> <message> <source>In:</source> <translation>输入:</translation> </message> <message> <source>Out:</source> <translation>输出:</translation> </message> <message> <source>Debug log file</source> <translation>调试日志文件</translation> </message> <message> <source>Clear console</source> <translation>清空控制台</translation> </message> <message> <source>1 &amp;hour</source> <translation>1 小时(&amp;H)</translation> </message> <message> <source>1 &amp;day</source> <translation>1 天(&amp;D)</translation> </message> <message> <source>1 &amp;week</source> <translation>1 周(&amp;W)</translation> </message> <message> <source>1 &amp;year</source> <translation>1 年(&amp;Y)</translation> </message> <message> <source>&amp;Disconnect</source> <translation>(&amp;D)断开</translation> </message> <message> <source>Ban for</source> <translation>禁止</translation> </message> <message> <source>&amp;Unban</source> <translation>重新允许</translation> </message> <message> <source>Welcome to the %1 RPC console.</source> <translation>欢迎使用 %1 的 RPC 控制台。</translation> </message> <message> <source>Use up and down arrows to navigate history, and %1 to clear screen.</source> <translation>使用上下方向键浏览历史, 以及 %1 清除屏幕。</translation> </message> <message> <source>Type %1 for an overview of available commands.</source> <translation>输入%1命令显示可用命令信息。</translation> </message> <message> <source>For more information on using this console type %1.</source> <translation>输入%1来取得使用这个控制台的更多信息。</translation> </message> <message> <source>WARNING: Scammers have been active, telling users to type commands here, stealing their wallet contents. Do not use this console without fully understanding the ramifications of a command.</source> <translation>警告: 已有骗子通过要求用户在此输入指令以盗取钱包。不要在没有完全理解命令规范时使用控制台。</translation> </message> <message> <source>Network activity disabled</source> <translation>网络活动已禁用</translation> </message> <message> <source>(node id: %1)</source> <translation>(节点ID: %1)</translation> </message> <message> <source>via %1</source> <translation>通过 %1</translation> </message> <message> <source>never</source> <translation>从未</translation> </message> <message> <source>Inbound</source> <translation>传入</translation> </message> <message> <source>Outbound</source> <translation>传出</translation> </message> <message> <source>Yes</source> <translation>是</translation> </message> <message> <source>No</source> <translation>否</translation> </message> <message> <source>Unknown</source> <translation>未知</translation> </message> </context> <context> <name>ReceiveCoinsDialog</name> <message> <source>&amp;Amount:</source> <translation>总额(&amp;A):</translation> </message> <message> <source>&amp;Label:</source> <translation>标签(&amp;L):</translation> </message> <message> <source>&amp;Message:</source> <translation>消息(&amp;M):</translation> </message> <message> <source>An optional message to attach to the payment request, which will be displayed when the request is opened. Note: The message will not be sent with the payment over the Neuralinkbrainkeycoin network.</source> <translation>可在付款请求上备注一条信息,在打开付款请求时可以看到。注意:该消息不是通过莱特币网络传送。</translation> </message> <message> <source>An optional label to associate with the new receiving address.</source> <translation>可为新建的收款地址添加一个标签。</translation> </message> <message> <source>Use this form to request payments. All fields are &lt;b&gt;optional&lt;/b&gt;.</source> <translation>使用此表单要求付款。所有字段都是&lt;b&gt;可选&lt;/b&gt;。</translation> </message> <message> <source>An optional amount to request. Leave this empty or zero to not request a specific amount.</source> <translation>可选的请求金额。留空或填零为不要求具体金额。</translation> </message> <message> <source>Clear all fields of the form.</source> <translation>清除此表单的所有字段。</translation> </message> <message> <source>Clear</source> <translation>清除</translation> </message> <message> <source>Generate native segwit (Bech32) address</source> <translation>生成本地分离见证 (Bech32)地址</translation> </message> <message> <source>Requested payments history</source> <translation>请求付款的历史</translation> </message> <message> <source>&amp;Request payment</source> <translation>请求付款(&amp;R)</translation> </message> <message> <source>Show the selected request (does the same as double clicking an entry)</source> <translation>显示选中的请求 (双击也可以显示)</translation> </message> <message> <source>Show</source> <translation>显示</translation> </message> <message> <source>Remove the selected entries from the list</source> <translation>从列表中移除选中的条目</translation> </message> <message> <source>Remove</source> <translation>移除</translation> </message> <message> <source>Copy URI</source> <translation>复制URI</translation> </message> <message> <source>Copy label</source> <translation>复制标签</translation> </message> <message> <source>Copy message</source> <translation>复制消息</translation> </message> <message> <source>Copy amount</source> <translation>复制金额</translation> </message> </context> <context> <name>ReceiveRequestDialog</name> <message> <source>QR Code</source> <translation>二维码</translation> </message> <message> <source>Copy &amp;URI</source> <translation>复制 URI(&amp;U)</translation> </message> <message> <source>Copy &amp;Address</source> <translation>复制地址(&amp;A)</translation> </message> <message> <source>&amp;Save Image...</source> <translation>保存图片(&amp;S)...</translation> </message> <message> <source>Request payment to %1</source> <translation>请求付款到 %1</translation> </message> <message> <source>Payment information</source> <translation>付款信息</translation> </message> <message> <source>URI</source> <translation>URI</translation> </message> <message> <source>Address</source> <translation>地址</translation> </message> <message> <source>Amount</source> <translation>金额</translation> </message> <message> <source>Label</source> <translation>标签</translation> </message> <message> <source>Message</source> <translation>消息</translation> </message> <message> <source>Wallet</source> <translation>钱包</translation> </message> <message> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation>URI 太长,请试着精简标签或消息文本。</translation> </message> <message> <source>Error encoding URI into QR Code.</source> <translation>把 URI 编码成二维码时发生错误。</translation> </message> </context> <context> <name>RecentRequestsTableModel</name> <message> <source>Date</source> <translation>日期</translation> </message> <message> <source>Label</source> <translation>标签</translation> </message> <message> <source>Message</source> <translation>消息</translation> </message> <message> <source>(no label)</source> <translation>(无标签)</translation> </message> <message> <source>(no message)</source> <translation>(无消息)</translation> </message> <message> <source>(no amount requested)</source> <translation>(无请求金额)</translation> </message> <message> <source>Requested</source> <translation>总额</translation> </message> </context> <context> <name>SendCoinsDialog</name> <message> <source>Send Coins</source> <translation>发送</translation> </message> <message> <source>Coin Control Features</source> <translation>交易源地址控制功能</translation> </message> <message> <source>Inputs...</source> <translation>输入...</translation> </message> <message> <source>automatically selected</source> <translation>自动选择</translation> </message> <message> <source>Insufficient funds!</source> <translation>存款不足!</translation> </message> <message> <source>Quantity:</source> <translation>总量:</translation> </message> <message> <source>Bytes:</source> <translation>字节:</translation> </message> <message> <source>Amount:</source> <translation>金额:</translation> </message> <message> <source>Fee:</source> <translation>费用:</translation> </message> <message> <source>After Fee:</source> <translation>加上交易费用后:</translation> </message> <message> <source>Change:</source> <translation>变更 : </translation> </message> <message> <source>If this is activated, but the change address is empty or invalid, change will be sent to a newly generated address.</source> <translation>如果激活该选项,但是零钱地址用光或者非法,将会新生成零钱地址,转入零钱。</translation> </message> <message> <source>Custom change address</source> <translation>自定义零钱地址</translation> </message> <message> <source>Transaction Fee:</source> <translation>交易费用:</translation> </message> <message> <source>Choose...</source> <translation>选择... </translation> </message> <message> <source>Using the fallbackfee can result in sending a transaction that will take several hours or days (or never) to confirm. Consider choosing your fee manually or wait until you have validated the complete chain.</source> <translation>如果使用备用交易费设置,有可能会导致交易经过几个小时、几天(甚至永远)无法被确认。请考虑手动选择交易费,或等待整个链完成验证。</translation> </message> <message> <source>Warning: Fee estimation is currently not possible.</source> <translation>警告: 目前无法进行交易费估计。</translation> </message> <message> <source>collapse fee-settings</source> <translation>收起 费用设置 </translation> </message> <message> <source>per kilobyte</source> <translation>每kb</translation> </message> <message> <source>Hide</source> <translation>隐藏</translation> </message> <message> <source>Paying only the minimum fee is just fine as long as there is less transaction volume than space in the blocks. But be aware that this can end up in a never confirming transaction once there is more demand for neuralinkbrainkeycoin transactions than the network can process.</source> <translation>交易量小时允许只支付最小交易费。但是请注意,当交易量大到超出网络可处理时您的交易可能永远无法确认。</translation> </message> <message> <source>(read the tooltip)</source> <translation>(请注意提示信息)</translation> </message> <message> <source>Recommended:</source> <translation>推荐:</translation> </message> <message> <source>Custom:</source> <translation>自定义:</translation> </message> <message> <source>(Smart fee not initialized yet. This usually takes a few blocks...)</source> <translation>(智能交易费用 尚未初始化。 需要再下载一些数据块...)</translation> </message> <message> <source>Send to multiple recipients at once</source> <translation>一次发送给多个接收者</translation> </message> <message> <source>Add &amp;Recipient</source> <translation>添加收款人(&amp;R)</translation> </message> <message> <source>Clear all fields of the form.</source> <translation>清除此表单的所有字段。</translation> </message> <message> <source>Dust:</source> <translation>小额:</translation> </message> <message> <source>Confirmation time target:</source> <translation>确认时间目标:</translation> </message> <message> <source>Enable Replace-By-Fee</source> <translation>启用手续费追加</translation> </message> <message> <source>With Replace-By-Fee (BIP-125) you can increase a transaction's fee after it is sent. Without this, a higher fee may be recommended to compensate for increased transaction delay risk.</source> <translation>手续费追加(Replace-By-Fee,BIP-125)可以让你在送出交易后才来提高手续费。不用这个功能的话,建议付比较高的手续费来降低交易延迟的风险。</translation> </message> <message> <source>Clear &amp;All</source> <translation>清除所有(&amp;A)</translation> </message> <message> <source>Balance:</source> <translation>余额:</translation> </message> <message> <source>Confirm the send action</source> <translation>确认发送货币</translation> </message> <message> <source>S&amp;end</source> <translation>发送(&amp;E)</translation> </message> <message> <source>Copy quantity</source> <translation>复制数目</translation> </message> <message> <source>Copy amount</source> <translation>复制金额</translation> </message> <message> <source>Copy fee</source> <translation>复制手续费</translation> </message> <message> <source>Copy after fee</source> <translation>复制计费后金额</translation> </message> <message> <source>Copy bytes</source> <translation>复制字节数</translation> </message> <message> <source>Copy dust</source> <translation>复制零散金额</translation> </message> <message> <source>Copy change</source> <translation>复制找零金额</translation> </message> <message> <source>%1 (%2 blocks)</source> <translation>%1 (%2个块)</translation> </message> <message> <source>%1 to %2</source> <translation>%1 到 %2</translation> </message> <message> <source>Are you sure you want to send?</source> <translation>您确定要发出吗?</translation> </message> <message> <source>or</source> <translation>或</translation> </message> <message> <source>You can increase the fee later (signals Replace-By-Fee, BIP-125).</source> <translation>你可以之后再提高手续费(有BIP-125手续费追加的标记)</translation> </message> <message> <source>Transaction fee</source> <translation>交易费用</translation> </message> <message> <source>Not signalling Replace-By-Fee, BIP-125.</source> <translation>没有BIP-125手续费追加的标记。</translation> </message> <message> <source>Confirm send coins</source> <translation>确认发送</translation> </message> <message> <source>The recipient address is not valid. Please recheck.</source> <translation>接收人地址无效。请重新检查。</translation> </message> <message> <source>The amount to pay must be larger than 0.</source> <translation>支付金额必须大于0。</translation> </message> <message> <source>The amount exceeds your balance.</source> <translation>金额超出您的余额。</translation> </message> <message> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation>计入 %1 交易费后的金额超出您的余额。</translation> </message> <message> <source>Duplicate address found: addresses should only be used once each.</source> <translation>发现重复地址:每个地址应该只使用一次。</translation> </message> <message> <source>Transaction creation failed!</source> <translation>交易创建失败!</translation> </message> <message> <source>The transaction was rejected with the following reason: %1</source> <translation>交易因以下原因拒绝:%1</translation> </message> <message> <source>A fee higher than %1 is considered an absurdly high fee.</source> <translation>交易费一般不应超过 %1。</translation> </message> <message> <source>Payment request expired.</source> <translation>付款请求已过期。</translation> </message> <message> <source>Pay only the required fee of %1</source> <translation>只支付必要费用 %1</translation> </message> <message> <source>Warning: Invalid Neuralinkbrainkeycoin address</source> <translation>警告: 莱特币地址无效</translation> </message> <message> <source>Warning: Unknown change address</source> <translation>警告:未知的更改地址</translation> </message> <message> <source>Confirm custom change address</source> <translation>确认用户找零地址</translation> </message> <message> <source>The address you selected for change is not part of this wallet. Any or all funds in your wallet may be sent to this address. Are you sure?</source> <translation>你选择的找零地址未被包含在本钱包中,你钱包中的部分或全部金额将被发送至该地址。你确定要这样做吗?</translation> </message> <message> <source>(no label)</source> <translation>(无标签)</translation> </message> </context> <context> <name>SendCoinsEntry</name> <message> <source>A&amp;mount:</source> <translation>金额(&amp;M)</translation> </message> <message> <source>Pay &amp;To:</source> <translation>付给(&amp;T):</translation> </message> <message> <source>&amp;Label:</source> <translation>标签(&amp;L):</translation> </message> <message> <source>Choose previously used address</source> <translation>选择以前用过的地址</translation> </message> <message> <source>This is a normal payment.</source> <translation>这是笔正常的支付。</translation> </message> <message> <source>The Neuralinkbrainkeycoin address to send the payment to</source> <translation>付款目的地址</translation> </message> <message> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <source>Paste address from clipboard</source> <translation>从剪贴板粘贴地址</translation> </message> <message> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <source>Remove this entry</source> <translation>移除此项</translation> </message> <message> <source>The fee will be deducted from the amount being sent. The recipient will receive less neuralinkbrainkeycoins than you enter in the amount field. If multiple recipients are selected, the fee is split equally.</source> <translation>交易费将从发送总额中扣除。接收人将收到比您在金额框中输入的更少的莱特币。如果选中了多个收件人,交易费平分。</translation> </message> <message> <source>S&amp;ubtract fee from amount</source> <translation>从金额中减去交易费(&amp;U)</translation> </message> <message> <source>Use available balance</source> <translation>使用全部可用余额</translation> </message> <message> <source>Message:</source> <translation>消息:</translation> </message> <message> <source>This is an unauthenticated payment request.</source> <translation>这是一个未经验证的支付请求。</translation> </message> <message> <source>This is an authenticated payment request.</source> <translation>这是一个已经验证的支付请求。</translation> </message> <message> <source>Enter a label for this address to add it to the list of used addresses</source> <translation>请为此地址输入一个标签以将它加入用过的地址列表</translation> </message> <message> <source>A message that was attached to the neuralinkbrainkeycoin: URI which will be stored with the transaction for your reference. Note: This message will not be sent over the Neuralinkbrainkeycoin network.</source> <translation>neuralinkbrainkeycoin:URI 附带的备注信息,将会和交易一起存储,备查。 注意:该消息不会通过莱特币网络传输。</translation> </message> <message> <source>Pay To:</source> <translation>支付给:</translation> </message> <message> <source>Memo:</source> <translation>便条:</translation> </message> <message> <source>Enter a label for this address to add it to your address book</source> <translation>为这个地址输入一个标签,以便将它添加到您的地址簿</translation> </message> </context> <context> <name>SendConfirmationDialog</name> <message> <source>Yes</source> <translation>是</translation> </message> </context> <context> <name>ShutdownWindow</name> <message> <source>%1 is shutting down...</source> <translation>正在关闭 %1 ...</translation> </message> <message> <source>Do not shut down the computer until this window disappears.</source> <translation>在此窗口消失前不要关闭计算机。</translation> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <source>Signatures - Sign / Verify a Message</source> <translation>签名 - 为消息签名/验证签名消息</translation> </message> <message> <source>&amp;Sign Message</source> <translation>签名消息(&amp;S)</translation> </message> <message> <source>You can sign messages/agreements with your addresses to prove you can receive neuralinkbrainkeycoins sent to them. Be careful not to sign anything vague or random, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation>您可以用你的地址对消息/协议进行签名,以证明您可以接收发送到该地址的莱特币。注意不要对任何模棱两可或者随机的消息进行签名,以免遭受钓鱼式攻击。请确保消息内容准确的表达了您的真实意愿。</translation> </message> <message> <source>The Neuralinkbrainkeycoin address to sign the message with</source> <translation>用来对消息签名的地址 </translation> </message> <message> <source>Choose previously used address</source> <translation>选择以前用过的地址</translation> </message> <message> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <source>Paste address from clipboard</source> <translation>从剪贴板粘贴地址</translation> </message> <message> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <source>Enter the message you want to sign here</source> <translation>请输入您要发送的签名消息</translation> </message> <message> <source>Signature</source> <translation>签名</translation> </message> <message> <source>Copy the current signature to the system clipboard</source> <translation>复制当前签名至剪切板</translation> </message> <message> <source>Sign the message to prove you own this Neuralinkbrainkeycoin address</source> <translation>签名消息,证明这个地址属于您。</translation> </message> <message> <source>Sign &amp;Message</source> <translation>消息签名(&amp;M)</translation> </message> <message> <source>Reset all sign message fields</source> <translation>清空所有签名消息栏</translation> </message> <message> <source>Clear &amp;All</source> <translation>清除所有(&amp;A)</translation> </message> <message> <source>&amp;Verify Message</source> <translation>验证消息(&amp;V)</translation> </message> <message> <source>Enter the receiver's address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack. Note that this only proves the signing party receives with the address, it cannot prove sendership of any transaction!</source> <translation>请在下面输入接收者地址、消息(确保换行符、空格符、制表符等完全相同)和签名以验证消息。请仔细核对签名信息,以提防中间人攻击。请注意,这只是证明接收方签名的地址,它不能证明任何交易!</translation> </message> <message> <source>The Neuralinkbrainkeycoin address the message was signed with</source> <translation>消息使用的签名地址</translation> </message> <message> <source>Verify the message to ensure it was signed with the specified Neuralinkbrainkeycoin address</source> <translation>验证消息,确保消息是由指定的莱特币地址签名过的。</translation> </message> <message> <source>Verify &amp;Message</source> <translation>验证消息签名(&amp;M)</translation> </message> <message> <source>Reset all verify message fields</source> <translation>清空所有验证消息栏</translation> </message> <message> <source>Click "Sign Message" to generate signature</source> <translation>单击“签名消息“产生签名。</translation> </message> <message> <source>The entered address is invalid.</source> <translation>输入的地址无效。</translation> </message> <message> <source>Please check the address and try again.</source> <translation>请检查地址后重试。</translation> </message> <message> <source>The entered address does not refer to a key.</source> <translation>输入的地址没有关联的公私钥对。</translation> </message> <message> <source>Wallet unlock was cancelled.</source> <translation>钱包解锁动作取消。</translation> </message> <message> <source>Private key for the entered address is not available.</source> <translation>找不到输入地址关联的私钥。</translation> </message> <message> <source>Message signing failed.</source> <translation>消息签名失败。</translation> </message> <message> <source>Message signed.</source> <translation>消息已签名。</translation> </message> <message> <source>The signature could not be decoded.</source> <translation>签名无法解码。</translation> </message> <message> <source>Please check the signature and try again.</source> <translation>请检查签名后重试。</translation> </message> <message> <source>The signature did not match the message digest.</source> <translation>签名与消息摘要不匹配。</translation> </message> <message> <source>Message verification failed.</source> <translation>消息验证失败。</translation> </message> <message> <source>Message verified.</source> <translation>消息验证成功。</translation> </message> </context> <context> <name>SplashScreen</name> <message> <source>[testnet]</source> <translation>[测试网络]</translation> </message> </context> <context> <name>TrafficGraphWidget</name> <message> <source>KB/s</source> <translation>KB/s</translation> </message> </context> <context> <name>TransactionDesc</name> <message> <source>Open until %1</source> <translation>至 %1 个数据块时开启</translation> </message> <message> <source>conflicted with a transaction with %1 confirmations</source> <translation>与一个有 %1 个确认的交易冲突</translation> </message> <message> <source>0/unconfirmed, %1</source> <translation>0/未确认,%1</translation> </message> <message> <source>in memory pool</source> <translation>在内存池中</translation> </message> <message> <source>not in memory pool</source> <translation>不在内存池中</translation> </message> <message> <source>abandoned</source> <translation>已抛弃</translation> </message> <message> <source>%1/unconfirmed</source> <translation>%1/未确认</translation> </message> <message> <source>%1 confirmations</source> <translation>%1 个确认</translation> </message> <message> <source>Status</source> <translation>状态</translation> </message> <message> <source>Date</source> <translation>日期</translation> </message> <message> <source>Source</source> <translation>源</translation> </message> <message> <source>Generated</source> <translation>生成</translation> </message> <message> <source>From</source> <translation>来自</translation> </message> <message> <source>unknown</source> <translation>未知</translation> </message> <message> <source>To</source> <translation>到</translation> </message> <message> <source>own address</source> <translation>自己的地址</translation> </message> <message> <source>watch-only</source> <translation>观察地址(watch-only) </translation> </message> <message> <source>label</source> <translation>标签</translation> </message> <message> <source>Credit</source> <translation>收入</translation> </message> <message> <source>not accepted</source> <translation>未被接受</translation> </message> <message> <source>Debit</source> <translation>支出</translation> </message> <message> <source>Total debit</source> <translation>总收入</translation> </message> <message> <source>Total credit</source> <translation>总支出</translation> </message> <message> <source>Transaction fee</source> <translation>交易费用</translation> </message> <message> <source>Net amount</source> <translation>净额</translation> </message> <message> <source>Message</source> <translation>消息</translation> </message> <message> <source>Comment</source> <translation>备注</translation> </message> <message> <source>Transaction ID</source> <translation>交易 ID</translation> </message> <message> <source>Transaction total size</source> <translation>交易总大小</translation> </message> <message> <source>Output index</source> <translation>输出索引</translation> </message> <message> <source>Merchant</source> <translation>商家</translation> </message> <message> <source>Generated coins must mature %1 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source> <translation>生成的莱特币在可以使用前必须有 %1 个成熟的区块。当您生成了此区块后,它将被广播到网络中以加入区块链。如果它未成功进入区块链,其状态将变更为“不接受”并且不可使用。这可能偶尔会发生,如果另一个节点比你早几秒钟成功生成一个区块。</translation> </message> <message> <source>Debug information</source> <translation>调试信息</translation> </message> <message> <source>Transaction</source> <translation>交易</translation> </message> <message> <source>Inputs</source> <translation>输入</translation> </message> <message> <source>Amount</source> <translation>金额</translation> </message> <message> <source>true</source> <translation>是</translation> </message> <message> <source>false</source> <translation>否</translation> </message> </context> <context> <name>TransactionDescDialog</name> <message> <source>This pane shows a detailed description of the transaction</source> <translation>当前面板显示了交易的详细信息</translation> </message> <message> <source>Details for %1</source> <translation>%1 详情</translation> </message> </context> <context> <name>TransactionTableModel</name> <message> <source>Date</source> <translation>日期</translation> </message> <message> <source>Type</source> <translation>种类</translation> </message> <message> <source>Label</source> <translation>标签</translation> </message> <message> <source>Open until %1</source> <translation>至 %1 个数据块时开启</translation> </message> <message> <source>Unconfirmed</source> <translation>未确认</translation> </message> <message> <source>Abandoned</source> <translation>已丢弃</translation> </message> <message> <source>Confirming (%1 of %2 recommended confirmations)</source> <translation>确认中 (推荐 %2个确认,已经有 %1个确认)</translation> </message> <message> <source>Confirmed (%1 confirmations)</source> <translation>已确认 (%1 条确认信息)</translation> </message> <message> <source>Conflicted</source> <translation>冲突的</translation> </message> <message> <source>Immature (%1 confirmations, will be available after %2)</source> <translation>未成熟 (%1 个确认,将在 %2 个后可用)</translation> </message> <message> <source>Generated but not accepted</source> <translation>已生成但未被接受</translation> </message> <message> <source>Received with</source> <translation>收款</translation> </message> <message> <source>Received from</source> <translation>收款来自</translation> </message> <message> <source>Sent to</source> <translation>付款</translation> </message> <message> <source>Payment to yourself</source> <translation>付款给自己</translation> </message> <message> <source>Mined</source> <translation>挖矿所得</translation> </message> <message> <source>watch-only</source> <translation>观察地址(watch-only) </translation> </message> <message> <source>(n/a)</source> <translation>(不可用)</translation> </message> <message> <source>(no label)</source> <translation>(无标签)</translation> </message> <message> <source>Transaction status. Hover over this field to show number of confirmations.</source> <translation>交易状态。 鼠标移到此区域可显示确认项数量。</translation> </message> <message> <source>Date and time that the transaction was received.</source> <translation>交易被接收的时间和日期。</translation> </message> <message> <source>Type of transaction.</source> <translation>交易类型。</translation> </message> <message> <source>Whether or not a watch-only address is involved in this transaction.</source> <translation>该交易中是否涉及 观察地址(watch-only address)。</translation> </message> <message> <source>User-defined intent/purpose of the transaction.</source> <translation>用户定义的该交易的意图/目的。</translation> </message> <message> <source>Amount removed from or added to balance.</source> <translation>从余额添加或移除的金额。</translation> </message> </context> <context> <name>TransactionView</name> <message> <source>All</source> <translation>全部</translation> </message> <message> <source>Today</source> <translation>今天</translation> </message> <message> <source>This week</source> <translation>这星期</translation> </message> <message> <source>This month</source> <translation>这个月</translation> </message> <message> <source>Last month</source> <translation>上个月</translation> </message> <message> <source>This year</source> <translation>今年</translation> </message> <message> <source>Range...</source> <translation>指定范围...</translation> </message> <message> <source>Received with</source> <translation>收款</translation> </message> <message> <source>Sent to</source> <translation>付款</translation> </message> <message> <source>To yourself</source> <translation>给自己</translation> </message> <message> <source>Mined</source> <translation>挖矿所得</translation> </message> <message> <source>Other</source> <translation>其它</translation> </message> <message> <source>Enter address, transaction id, or label to search</source> <translation>输入地址、交易识别码或标签进行搜索</translation> </message> <message> <source>Min amount</source> <translation>最小金额</translation> </message> <message> <source>Abandon transaction</source> <translation>放弃交易</translation> </message> <message> <source>Increase transaction fee</source> <translation>增加交易费</translation> </message> <message> <source>Copy address</source> <translation>复制地址</translation> </message> <message> <source>Copy label</source> <translation>复制标签</translation> </message> <message> <source>Copy amount</source> <translation>复制金额</translation> </message> <message> <source>Copy transaction ID</source> <translation>复制交易识别码</translation> </message> <message> <source>Copy raw transaction</source> <translation>拷贝原始交易</translation> </message> <message> <source>Copy full transaction details</source> <translation>复制所有交易详情</translation> </message> <message> <source>Edit label</source> <translation>编辑标签</translation> </message> <message> <source>Show transaction details</source> <translation>显示交易详情</translation> </message> <message> <source>Export Transaction History</source> <translation>导出交易历史</translation> </message> <message> <source>Comma separated file (*.csv)</source> <translation>逗号分隔文件 (*.csv)</translation> </message> <message> <source>Confirmed</source> <translation>已确认</translation> </message> <message> <source>Watch-only</source> <translation>观察地址(Watch-only) </translation> </message> <message> <source>Date</source> <translation>日期</translation> </message> <message> <source>Type</source> <translation>种类</translation> </message> <message> <source>Label</source> <translation>标签</translation> </message> <message> <source>Address</source> <translation>地址</translation> </message> <message> <source>ID</source> <translation>ID</translation> </message> <message> <source>Exporting Failed</source> <translation>导出失败</translation> </message> <message> <source>There was an error trying to save the transaction history to %1.</source> <translation>尝试保存交易历史 %1 时发生了错误。</translation> </message> <message> <source>Exporting Successful</source> <translation>导出成功</translation> </message> <message> <source>The transaction history was successfully saved to %1.</source> <translation>交易历史已成功保存到 %1。</translation> </message> <message> <source>Range:</source> <translation>范围:</translation> </message> <message> <source>to</source> <translation>到</translation> </message> </context> <context> <name>UnitDisplayStatusBarControl</name> <message> <source>Unit to show amounts in. Click to select another unit.</source> <translation>金额单位。单击选择别的单位。</translation> </message> </context> <context> <name>WalletFrame</name> <message> <source>No wallet has been loaded.</source> <translation>没有载入钱包。</translation> </message> </context> <context> <name>WalletModel</name> <message> <source>Send Coins</source> <translation>发送</translation> </message> <message> <source>Fee bump error</source> <translation>手续费提升失败</translation> </message> <message> <source>Increasing transaction fee failed</source> <translation>增加交易费失败</translation> </message> <message> <source>Do you want to increase the fee?</source> <translation>你是否愿意增加交易费?</translation> </message> <message> <source>Current fee:</source> <translation>当前交易费:</translation> </message> <message> <source>Increase:</source> <translation>增加量:</translation> </message> <message> <source>New fee:</source> <translation>新交易费:</translation> </message> <message> <source>Confirm fee bump</source> <translation>确认手续费提升</translation> </message> <message> <source>Can't sign transaction.</source> <translation>无法签署交易。</translation> </message> <message> <source>Could not commit transaction</source> <translation>无法提交交易</translation> </message> </context> <context> <name>WalletView</name> <message> <source>&amp;Export</source> <translation>导出(&amp;E)</translation> </message> <message> <source>Export the data in the current tab to a file</source> <translation>将当前标签页数据导出到文件</translation> </message> <message> <source>Backup Wallet</source> <translation>备份钱包</translation> </message> <message> <source>Wallet Data (*.dat)</source> <translation>钱包文件(*.dat)</translation> </message> <message> <source>Backup Failed</source> <translation>备份失败</translation> </message> <message> <source>There was an error trying to save the wallet data to %1.</source> <translation>尝试保存钱包数据至 %1 时发生了错误。</translation> </message> <message> <source>Backup Successful</source> <translation>备份成功</translation> </message> <message> <source>The wallet data was successfully saved to %1.</source> <translation>钱包数据成功保存至 %1。</translation> </message> </context> <context> <name>bitcoin-core</name> <message> <source>Distributed under the MIT software license, see the accompanying file %s or %s</source> <translation>在MIT协议下分发,参见附带的 %s 文件或 %s</translation> </message> <message> <source>Prune configured below the minimum of %d MiB. Please use a higher number.</source> <translation>修剪值被设置为低于最小值%d MiB,请使用更大的数值。</translation> </message> <message> <source>Prune: last wallet synchronisation goes beyond pruned data. You need to -reindex (download the whole blockchain again in case of pruned node)</source> <translation>修剪:最后的钱包同步超过了修剪的数据。你需要通过 -reindex (重新下载整个区块链以防修剪节点)</translation> </message> <message> <source>Rescans are not possible in pruned mode. You will need to use -reindex which will download the whole blockchain again.</source> <translation>无法在开启修剪的状态下重扫描,请使用 -reindex重新下载完整的区块链。</translation> </message> <message> <source>Error: A fatal internal error occurred, see debug.log for details</source> <translation>错误:发生了致命的内部错误,详情见 debug.log 文件</translation> </message> <message> <source>Pruning blockstore...</source> <translation>正在修剪区块存储...</translation> </message> <message> <source>Unable to start HTTP server. See debug log for details.</source> <translation>无法启动HTTP服务,查看日志获取更多信息</translation> </message> <message> <source>Neuralinkbrainkeycoin Core</source> <translation>Neuralinkbrainkeycoin Core</translation> </message> <message> <source>The %s developers</source> <translation>%s 开发人员</translation> </message> <message> <source>Cannot obtain a lock on data directory %s. %s is probably already running.</source> <translation>无法锁定数据目录 %s。%s 可能已经在运行。</translation> </message> <message> <source>Cannot provide specific connections and have addrman find outgoing connections at the same.</source> <translation>无法同时指定特定连接地址以及自动寻找连接。</translation> </message> <message> <source>Error reading %s! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source> <translation>读取 %s 时发生错误!所有的密钥都可以正确读取,但是交易记录或地址簿数据可能已经丢失或出错。</translation> </message> <message> <source>Please check that your computer's date and time are correct! If your clock is wrong, %s will not work properly.</source> <translation>请检查电脑的日期时间设置是否正确!时间错误可能会导致 %s 运行异常。</translation> </message> <message> <source>Please contribute if you find %s useful. Visit %s for further information about the software.</source> <translation>如果你认为%s对你比较有用的话,请对我们进行一些捐赠支持。请访问%s网站来获取有关这个软件的更多信息。</translation> </message> <message> <source>The block database contains a block which appears to be from the future. This may be due to your computer's date and time being set incorrectly. Only rebuild the block database if you are sure that your computer's date and time are correct</source> <translation>区块数据库包含未来的交易,这可能是由本机错误的日期时间引起。若确认本机日期时间正确,请重新建立区块数据库。</translation> </message> <message> <source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source> <translation>这是测试用的预发布版本 - 请谨慎使用 - 不要用来挖矿,或者在正式商用环境下使用</translation> </message> <message> <source>This is the transaction fee you may discard if change is smaller than dust at this level</source> <translation>如果对你的交易量来说,消耗的手续费微乎其微,那么这笔手续费你或许可以忽略它。</translation> </message> <message> <source>Unable to replay blocks. You will need to rebuild the database using -reindex-chainstate.</source> <translation>没办法重算区块。你需要先用-reindex-chainstate参数来重建数据库。</translation> </message> <message> <source>Unable to rewind the database to a pre-fork state. You will need to redownload the blockchain</source> <translation>没办法将数据库倒转回分叉前的状态。必须要重新下载区块链。</translation> </message> <message> <source>Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.</source> <translation>警告:网络似乎并不完全同意!有些矿工似乎遇到了问题。</translation> </message> <message> <source>Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.</source> <translation>警告:我们的同行似乎不完全同意!您可能需要升级,或者其他节点可能需要升级。</translation> </message> <message> <source>%d of last 100 blocks have unexpected version</source> <translation>最后100个区块中的%d个包含未知的版本号</translation> </message> <message> <source>%s corrupt, salvage failed</source> <translation>%s 已损坏,抢救备份失败</translation> </message> <message> <source>-maxmempool must be at least %d MB</source> <translation>-maxmempool 最小为%d MB</translation> </message> <message> <source>Cannot resolve -%s address: '%s'</source> <translation>无法解析 - %s 地址: '%s'</translation> </message> <message> <source>Change index out of range</source> <translation>修改索引超过范围</translation> </message> <message> <source>Copyright (C) %i-%i</source> <translation>版权所有 (C) %i-%i</translation> </message> <message> <source>Corrupted block database detected</source> <translation>检测发现数据块数据库损坏。请使用 -reindex参数重启客户端。</translation> </message> <message> <source>Do you want to rebuild the block database now?</source> <translation>你想现在就重建块数据库吗?</translation> </message> <message> <source>Error creating %s: You can't create non-HD wallets with this version.</source> <translation>生成%s发生错误:这个版本不能用来产生非HD钱包。</translation> </message> <message> <source>Error initializing block database</source> <translation>初始化数据块数据库出错</translation> </message> <message> <source>Error initializing wallet database environment %s!</source> <translation>初始化钱包数据库环境错误 %s!</translation> </message> <message> <source>Error loading %s</source> <translation>载入 %s 时发生错误</translation> </message> <message> <source>Error loading %s: Wallet corrupted</source> <translation>%s 加载出错:钱包损坏</translation> </message> <message> <source>Error loading %s: Wallet requires newer version of %s</source> <translation>%s 加载错误:请升级到最新版 %s</translation> </message> <message> <source>Error loading block database</source> <translation>导入数据块数据库出错</translation> </message> <message> <source>Error opening block database</source> <translation>导入数据块数据库出错</translation> </message> <message> <source>Error: Disk space is low!</source> <translation>错误:磁盘剩余空间低!</translation> </message> <message> <source>Failed to listen on any port. Use -listen=0 if you want this.</source> <translation>监听端口失败。请使用 -listen=0 参数。</translation> </message> <message> <source>Failed to rescan the wallet during initialization</source> <translation>初始化时重新扫描钱包失败了</translation> </message> <message> <source>Importing...</source> <translation>导入中...</translation> </message> <message> <source>Incorrect or no genesis block found. Wrong datadir for network?</source> <translation>不正确或没有找到起源区块。网络错误?</translation> </message> <message> <source>Invalid amount for -%s=&lt;amount&gt;: '%s'</source> <translation>无效的金额 -%s=&lt;amount&gt;: '%s'</translation> </message> <message> <source>Invalid amount for -discardfee=&lt;amount&gt;: '%s'</source> <translation>无效的金额 -discardfee=&lt;amount&gt;: '%s'</translation> </message> <message> <source>Invalid amount for -fallbackfee=&lt;amount&gt;: '%s'</source> <translation>-fallbackfee 的无效数额=&lt;amount&gt;: '%s'</translation> </message> <message> <source>Loading P2P addresses...</source> <translation>正在加载P2P地址...</translation> </message> <message> <source>Loading banlist...</source> <translation>正在加载黑名单...</translation> </message> <message> <source>Not enough file descriptors available.</source> <translation>没有足够的文件描述符可用。</translation> </message> <message> <source>Prune cannot be configured with a negative value.</source> <translation>修剪不能配置一个负数。</translation> </message> <message> <source>Prune mode is incompatible with -txindex.</source> <translation>修剪模式与 -txindex 不兼容。</translation> </message> <message> <source>Replaying blocks...</source> <translation>正在对区块进行重算…</translation> </message> <message> <source>Rewinding blocks...</source> <translation>回退区块</translation> </message> <message> <source>The source code is available from %s.</source> <translation>源代码可以在 %s 获得。</translation> </message> <message> <source>Transaction fee and change calculation failed</source> <translation>计算交易手续费和找零失败了</translation> </message> <message> <source>Unable to bind to %s on this computer. %s is probably already running.</source> <translation>无法在本机绑定 %s 端口。%s 可能已经在运行。</translation> </message> <message> <source>Unsupported argument -benchmark ignored, use -debug=bench.</source> <translation>忽略不支持的选项 -benchmark,使用 -debug=bench</translation> </message> <message> <source>Unsupported argument -debugnet ignored, use -debug=net.</source> <translation>忽略不支持的选项 -debugnet,使用 -debug=net。</translation> </message> <message> <source>Unsupported argument -tor found, use -onion.</source> <translation>忽略不支持的选项 -tor,使用 -oinon</translation> </message> <message> <source>Unsupported logging category %s=%s.</source> <translation>不支持的日志分类 %s=%s.</translation> </message> <message> <source>Upgrading UTXO database</source> <translation>升级UTXO数据库</translation> </message> <message> <source>User Agent comment (%s) contains unsafe characters.</source> <translation>用户代理评论(%s)包含不安全的字符。</translation> </message> <message> <source>Verifying blocks...</source> <translation>正在验证区块...</translation> </message> <message> <source>Wallet needed to be rewritten: restart %s to complete</source> <translation>钱包需要被重写:请重新启动%s来完成</translation> </message> <message> <source>Error: Listening for incoming connections failed (listen returned error %s)</source> <translation>错误:监听外部连接失败 (监听返回错误 %s) </translation> </message> <message> <source>Invalid amount for -maxtxfee=&lt;amount&gt;: '%s' (must be at least the minrelay fee of %s to prevent stuck transactions)</source> <translation>-maxtxfee=&lt;amount&gt;: '%s' 的金额无效(交易费至少为 %s,以免交易滞留过久)</translation> </message> <message> <source>The transaction amount is too small to send after the fee has been deducted</source> <translation>在交易费被扣除后发送的交易金额太小</translation> </message> <message> <source>You need to rebuild the database using -reindex to go back to unpruned mode. This will redownload the entire blockchain</source> <translation>您需要使用 -reindex 重新构建数据库以返回未修剪的模式。这将重新下载整个区块链</translation> </message> <message> <source>Error loading %s: You can't disable HD on an already existing HD wallet</source> <translation>加载%s发生错误:不能对已存在的HD钱包停用HD功能。</translation> </message> <message> <source>Error reading from database, shutting down.</source> <translation>读取数据库出错,关闭中。</translation> </message> <message> <source>Error upgrading chainstate database</source> <translation>升级链状态数据库出错</translation> </message> <message> <source>Information</source> <translation>信息</translation> </message> <message> <source>Invalid -onion address or hostname: '%s'</source> <translation>无效的 -onion 地址: '%s'</translation> </message> <message> <source>Invalid -proxy address or hostname: '%s'</source> <translation>无效的 -proxy 地址: '%s'</translation> </message> <message> <source>Invalid amount for -paytxfee=&lt;amount&gt;: '%s' (must be at least %s)</source> <translation>无效的金额 -paytxfee=&lt;amount&gt;: '%s' (必须至少为 %s)</translation> </message> <message> <source>Invalid netmask specified in -whitelist: '%s'</source> <translation>-whitelist: '%s' 指定的网络掩码无效</translation> </message> <message> <source>Need to specify a port with -whitebind: '%s'</source> <translation>-whitebind: '%s' 需要指定一个端口</translation> </message> <message> <source>Reducing -maxconnections from %d to %d, because of system limitations.</source> <translation>因为系统的限制,将 -maxconnections 参数从 %d 降到了 %d</translation> </message> <message> <source>Signing transaction failed</source> <translation>签署交易失败</translation> </message> <message> <source>Specified -walletdir "%s" does not exist</source> <translation>以-walletdir指定的路径“%s”不存在</translation> </message> <message> <source>Specified -walletdir "%s" is a relative path</source> <translation>以-walletdir指定的路径“%s”是相对路径</translation> </message> <message> <source>Specified -walletdir "%s" is not a directory</source> <translation>以-walletdir指定的路径“%s”不是个目录</translation> </message> <message> <source>The transaction amount is too small to pay the fee</source> <translation>交易金额太小,不足以支付交易费</translation> </message> <message> <source>This is experimental software.</source> <translation>这是实验性的软件。</translation> </message> <message> <source>Transaction amount too small</source> <translation>交易量太小</translation> </message> <message> <source>Transaction too large for fee policy</source> <translation>费用策略的交易太大</translation> </message> <message> <source>Transaction too large</source> <translation>交易太大</translation> </message> <message> <source>Unable to bind to %s on this computer (bind returned error %s)</source> <translation>无法在此计算机上绑定 %s (绑定返回错误 %s)</translation> </message> <message> <source>Unable to generate initial keys</source> <translation>无法产生初始的密钥</translation> </message> <message> <source>Verifying wallet(s)...</source> <translation>正在检测钱包的完整性...</translation> </message> <message> <source>Wallet %s resides outside wallet directory %s</source> <translation>钱包文件%s没有在钱包目录%s里面</translation> </message> <message> <source>Warning</source> <translation>警告</translation> </message> <message> <source>Warning: unknown new rules activated (versionbit %i)</source> <translation>警告: 不明的交易规则被启用了(versionbit %i)</translation> </message> <message> <source>Zapping all transactions from wallet...</source> <translation>正在消除錢包中的所有交易...</translation> </message> <message> <source>-maxtxfee is set very high! Fees this large could be paid on a single transaction.</source> <translation>参数 -maxtxfee 设定了很高的金额!这是你一次交易就有可能付出的最高手续费。</translation> </message> <message> <source>Error loading %s: You can't enable HD on an already existing non-HD wallet</source> <translation>加载%s发生错误:不能对已存在的非HD钱包启用HD功能</translation> </message> <message> <source>This is the transaction fee you may pay when fee estimates are not available.</source> <translation>这是在费用估计不可用时你可能会支付的交易费。</translation> </message> <message> <source>This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit %s and cryptographic software written by Eric Young and UPnP software written by Thomas Bernard.</source> <translation>此产品包含了由OpenSSL Project所开发的OpenSSL Toolkit软件%s,由Eric Young撰写的加解密软件,以及由Thomas Bernard所撰写的UPnP软件。</translation> </message> <message> <source>Total length of network version string (%i) exceeds maximum length (%i). Reduce the number or size of uacomments.</source> <translation>网络版本字符串的总长度 (%i) 超过最大长度 (%i) 了。请减少 uacomment 参数的数目或长度。</translation> </message> <message> <source>Unsupported argument -socks found. Setting SOCKS version isn't possible anymore, only SOCKS5 proxies are supported.</source> <translation>找到不再支持的 -socks 参数。现在只支持 SOCKS5 协议的代理服务器,因此不可以指定 SOCKS 协议版本。</translation> </message> <message> <source>Unsupported argument -whitelistalwaysrelay ignored, use -whitelistrelay and/or -whitelistforcerelay.</source> <translation>一个不被支持的参数 -whitelistalwaysrelay 被忽略了。请使用 -whitelistrelay 或者 -whitelistforcerelay.</translation> </message> <message> <source>Warning: Unknown block versions being mined! It's possible unknown rules are in effect</source> <translation>警告: 未知的区块版本被挖掘!未知规则可能已生效</translation> </message> <message> <source>Warning: Wallet file corrupt, data salvaged! Original %s saved as %s in %s; if your balance or transactions are incorrect you should restore from a backup.</source> <translation>警告:钱包文件损坏,但数据被救回!原始的钱包文件%s已经重命名为%s并存储到%s目录下 。如果您的账户余额或者交易记录不正确,请使用您的钱包备份文件恢复。</translation> </message> <message> <source>%s is set very high!</source> <translation>%s非常高!</translation> </message> <message> <source>Error loading wallet %s. Duplicate -wallet filename specified.</source> <translation>加载钱包 %s 出错。 重复的 -wallet 文件名。</translation> </message> <message> <source>Keypool ran out, please call keypoolrefill first</source> <translation>密钥池已经耗尽,请先执行keypoolrefill</translation> </message> <message> <source>Starting network threads...</source> <translation>正在启动网络线程...</translation> </message> <message> <source>The wallet will avoid paying less than the minimum relay fee.</source> <translation>钱包避免低于最小交易费的支付</translation> </message> <message> <source>This is the minimum transaction fee you pay on every transaction.</source> <translation>这是你每次交易付款时最少要付的手续费。</translation> </message> <message> <source>This is the transaction fee you will pay if you send a transaction.</source> <translation>如果发送交易,这将是你要支付的交易费。</translation> </message> <message> <source>Transaction amounts must not be negative</source> <translation>交易金额不不可为负数</translation> </message> <message> <source>Transaction has too long of a mempool chain</source> <translation>交易造成内存池中的交易链太长</translation> </message> <message> <source>Transaction must have at least one recipient</source> <translation>交易必须包含至少一个接收人</translation> </message> <message> <source>Unknown network specified in -onlynet: '%s'</source> <translation>-onlynet 指定的是未知网络:%s</translation> </message> <message> <source>Insufficient funds</source> <translation>金额不足</translation> </message> <message> <source>Loading block index...</source> <translation>正在加载区块索引...</translation> </message> <message> <source>Loading wallet...</source> <translation>正在加载钱包...</translation> </message> <message> <source>Cannot downgrade wallet</source> <translation>无法降级钱包</translation> </message> <message> <source>Rescanning...</source> <translation>正在重新扫描...</translation> </message> <message> <source>Done loading</source> <translation>加载完成</translation> </message> <message> <source>Error</source> <translation>错误</translation> </message> </context> </TS>
<message> <source>Edit sending address</source>
ipGroup.ts
// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** // *** Do not edit by hand unless you're certain you know what you are doing! *** import * as pulumi from "@pulumi/pulumi"; import * as inputs from "../types/input"; import * as outputs from "../types/output"; import * as utilities from "../utilities"; /** * Provides an IP access control group in AWS WorkSpaces Service * * ## Example Usage * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as aws from "@pulumi/aws"; * * const contractors = new aws.workspaces.IpGroup("contractors", { * description: "Contractors IP access control group", * }); * ``` */ export class IpGroup extends pulumi.CustomResource { /** * Get an existing IpGroup resource's state with the given name, ID, and optional extra * properties used to qualify the lookup. * * @param name The _unique_ name of the resulting resource. * @param id The _unique_ provider ID of the resource to lookup. * @param state Any extra arguments used during the lookup. * @param opts Optional settings to control the behavior of the CustomResource. */ public static get(name: string, id: pulumi.Input<pulumi.ID>, state?: IpGroupState, opts?: pulumi.CustomResourceOptions): IpGroup { return new IpGroup(name, <any>state, { ...opts, id: id }); } /** @internal */ public static readonly __pulumiType = 'aws:workspaces/ipGroup:IpGroup'; /** * Returns true if the given object is an instance of IpGroup. This is designed to work even * when multiple copies of the Pulumi SDK have been loaded into the same process. */ public static isInstance(obj: any): obj is IpGroup { if (obj === undefined || obj === null) { return false; } return obj['__pulumiType'] === IpGroup.__pulumiType; } /** * The description. */ public readonly description!: pulumi.Output<string | undefined>; /** * The name of the IP group. */ public readonly name!: pulumi.Output<string>; /** * One or more pairs specifying the IP group rule (in CIDR format) from which web requests originate. */ public readonly rules!: pulumi.Output<outputs.workspaces.IpGroupRule[] | undefined>; public readonly tags!: pulumi.Output<{[key: string]: string} | undefined>; /** * Create a IpGroup resource with the given unique name, arguments, and options. * * @param name The _unique_ name of the resource. * @param args The arguments to use to populate this resource's properties. * @param opts A bag of options that control this resource's behavior. */ constructor(name: string, args?: IpGroupArgs, opts?: pulumi.CustomResourceOptions) constructor(name: string, argsOrState?: IpGroupArgs | IpGroupState, opts?: pulumi.CustomResourceOptions) { let inputs: pulumi.Inputs = {}; if (opts && opts.id) { const state = argsOrState as IpGroupState | undefined; inputs["description"] = state ? state.description : undefined; inputs["name"] = state ? state.name : undefined; inputs["rules"] = state ? state.rules : undefined; inputs["tags"] = state ? state.tags : undefined; } else { const args = argsOrState as IpGroupArgs | undefined; inputs["description"] = args ? args.description : undefined; inputs["name"] = args ? args.name : undefined; inputs["rules"] = args ? args.rules : undefined; inputs["tags"] = args ? args.tags : undefined; } if (!opts) { opts = {} } if (!opts.version) { opts.version = utilities.getVersion(); } super(IpGroup.__pulumiType, name, inputs, opts); } } /** * Input properties used for looking up and filtering IpGroup resources. */ export interface IpGroupState { /** * The description. */ readonly description?: pulumi.Input<string>; /** * The name of the IP group. */
readonly rules?: pulumi.Input<pulumi.Input<inputs.workspaces.IpGroupRule>[]>; readonly tags?: pulumi.Input<{[key: string]: pulumi.Input<string>}>; } /** * The set of arguments for constructing a IpGroup resource. */ export interface IpGroupArgs { /** * The description. */ readonly description?: pulumi.Input<string>; /** * The name of the IP group. */ readonly name?: pulumi.Input<string>; /** * One or more pairs specifying the IP group rule (in CIDR format) from which web requests originate. */ readonly rules?: pulumi.Input<pulumi.Input<inputs.workspaces.IpGroupRule>[]>; readonly tags?: pulumi.Input<{[key: string]: pulumi.Input<string>}>; }
readonly name?: pulumi.Input<string>; /** * One or more pairs specifying the IP group rule (in CIDR format) from which web requests originate. */
mock_match_test.go
package match import ( "testing" "github.com/kolesa-team/http-api-mock/definition" ) func
(t *testing.T) { hreq := &definition.Request{} hreq.Method = "GET" mreq := &definition.Request{} mreq.Method = "GET" m := MockMatch{} if m, err := m.Match(hreq, mreq); !m { t.Error(err) } mreq.Method = "POST" if m, _ := m.Match(hreq, mreq); m { t.Error("Not expected match") } } func TestMatchPath(t *testing.T) { hreq := &definition.Request{} hreq.Path = "/a/b/c" mreq := &definition.Request{} mreq.Path = "/a/b/c" m := MockMatch{} if m, err := m.Match(hreq, mreq); !m { t.Error(err) } mreq.Path = "/a/b/d" if m, _ := m.Match(hreq, mreq); m { t.Error("Not expected match") } } func TestGlobPath(t *testing.T) { hreq := &definition.Request{} hreq.Path = "/a/b/c" mreq := &definition.Request{} mreq.Path = "/a/b/*" m := MockMatch{} if m, err := m.Match(hreq, mreq); !m { t.Error(err) } } func TestRouteParametersPath(t *testing.T) { hreq := &definition.Request{} hreq.Path = "/a/b/c" mreq := &definition.Request{} mreq.Path = "/a/:b/:c" m := MockMatch{} if m, err := m.Match(hreq, mreq); !m { t.Error(err) } } func TestMatchQueryString(t *testing.T) { hreq := &definition.Request{} hval := make(definition.Values) hval["test"] = []string{"test"} hreq.QueryStringParameters = hval mreq := &definition.Request{} mval := make(definition.Values) mval["test"] = []string{"test"} mreq.QueryStringParameters = mval m := MockMatch{} if m, err := m.Match(hreq, mreq); !m { t.Error(err) } mval["test2"] = []string{"test2"} if m, _ := m.Match(hreq, mreq); m { t.Error("Not expected match") } } func TestMatchCookies(t *testing.T) { hreq := &definition.Request{} hval := make(definition.Cookies) hval["cookie"] = "val" hreq.Cookies = hval mreq := &definition.Request{} mval := make(definition.Cookies) mval["cookie"] = "val" mreq.Cookies = mval m := MockMatch{} if m, err := m.Match(hreq, mreq); !m { t.Error(err) } mval["cookie2"] = "val2" if m, _ := m.Match(hreq, mreq); m { t.Error("Not expected match") } } func TestMatchHeaders(t *testing.T) { hreq := &definition.Request{} hval := make(definition.Values) hval["Test"] = []string{"test"} hreq.Headers = hval mreq := &definition.Request{} mval := make(definition.Values) mval["test"] = []string{"test"} mreq.Headers = mval m := MockMatch{} if m, err := m.Match(hreq, mreq); !m { t.Error(err) } mval["test"] = []string{"test2"} if m, _ := m.Match(hreq, mreq); m { t.Error("Not expected match") } } func TestMatchBody(t *testing.T) { hreq := &definition.Request{} hreq.Body = "HelloWorld" mreq := &definition.Request{} mreq.Body = "HelloWorld" m := MockMatch{} if m, err := m.Match(hreq, mreq); !m { t.Error(err) } mreq.Body = "ByeWorld" if m, _ := m.Match(hreq, mreq); m { t.Error("Not expected match") } } func TestGlobBody(t *testing.T) { hreq := &definition.Request{} hreq.Body = "Hello World From Test" mreq := &definition.Request{} mreq.Body = "*World*" m := MockMatch{} if m, err := m.Match(hreq, mreq); !m { t.Error(err) } } func TestMatchIgnoreMissingBodyDefinition(t *testing.T) { hreq := &definition.Request{} hreq.Body = "HelloWorld" mreq := &definition.Request{} m := MockMatch{} if m, err := m.Match(hreq, mreq); !m { t.Error(err) } } func TestMatchIgnoreUnexpectedHeadersAndQuery(t *testing.T) { hreq := &definition.Request{} hreq.Method = "GET" hreq.Path = "/a/b/c" hval := make(definition.Values) hval["test"] = []string{"test"} hval["test2"] = []string{"test"} hval["test3"] = []string{"test"} hreq.QueryStringParameters = hval hreq.Headers = hval mreq := &definition.Request{} mreq.Method = "GET" mreq.Path = "/a/b/c" mval := make(definition.Values) mval["test"] = []string{"test"} mreq.QueryStringParameters = mval mreq.Headers = mval m := MockMatch{} if m, err := m.Match(hreq, mreq); !m { t.Error(err) } }
TestMatchMethod
ls.go
package externalservice import ( "github.com/rancher/rio/cli/pkg/clicontext" "github.com/rancher/rio/cli/pkg/table" "github.com/rancher/rio/cli/pkg/tables" clitypes "github.com/rancher/rio/cli/pkg/types" "github.com/urfave/cli" ) type Ls struct { }
cmd.Flags = append(cmd.Flags, table.WriterFlags()...) } func (l *Ls) Run(ctx *clicontext.CLIContext) error { externalServices, err := ctx.List(clitypes.ExternalServiceType) if err != nil { return err } writer := tables.NewExternalService(ctx) return writer.Write(externalServices) }
func (l *Ls) Customize(cmd *cli.Command) {
language.py
from spacy.language import Language from spacy.tokens import Doc, Span, Token from spacy.util import get_lang_class from spacy.gold import GoldParse from .util import is_special_token from . import about class PyTT_Language(Language): """A subclass of spacy.Language that holds a PyTorch-Transformer (PyTT) pipeline. PyTT pipelines work only slightly differently from spaCy's default pipelines. Specifically, we introduce a new pipeline component at the start of the pipeline, PyTT_TokenVectorEncoder. We then modify the nlp.update() function to run the PyTT_TokenVectorEncoder before the other pipeline components, and backprop it after the other components are done. """ lang_factory_name = "pytt" @staticmethod def install_extensions(): tok2vec_attrs = [ "pytt_last_hidden_state", "pytt_pooler_output", "pytt_all_hidden_states", "pytt_all_attentions", "pytt_d_last_hidden_state", "pytt_d_pooler_output", "pytt_d_all_hidden_states", "pytt_d_all_attentions", ] for attr in tok2vec_attrs: Doc.set_extension(attr, default=None) Span.set_extension(attr, getter=get_span_tok2vec_getter(attr)) Token.set_extension(attr, getter=get_token_tok2vec_getter(attr)) wp_attrs = ["pytt_alignment", "pytt_word_pieces", "pytt_word_pieces_"] for attr in wp_attrs: Doc.set_extension(attr, default=None) Span.set_extension(attr, getter=get_span_wp_getter(attr)) Token.set_extension(attr, getter=get_token_wp_getter(attr)) for cls in [Token, Span, Doc]: cls.set_extension("pytt_start", getter=get_wp_start) cls.set_extension("pytt_end", getter=get_wp_end) def __init__( self, vocab=True, make_doc=True, max_length=10 ** 6, meta={}, **kwargs ): """Initialize the language class. Expects either a pytt_name setting in the meta or as a keyword argument, specifying the pre-trained model name. This is used to set up the model-specific tokenizer. """ meta = dict(meta) meta["lang_factory"] = self.lang_factory_name # Add this package to requirements to it will be included in the # install_requires of any model using this language class package = f"{about.__title__}>={about.__version__}" meta.setdefault("requirements", []).append(package) self.lang = meta.get("lang", "xx") self.Defaults = get_defaults(self.lang) super().__init__(vocab, make_doc, max_length, meta=meta, **kwargs) def update(self, docs, golds, drop=0.0, sgd=None, losses=None, component_cfg={}): component_cfg = dict(component_cfg) sentencizer = self.get_pipe("sentencizer") wp = self.get_pipe("pytt_wordpiecer") tok2vec = self.get_pipe("pytt_tok2vec") new_docs = [] new_golds = [] for doc, gold in zip(docs, golds): if isinstance(doc, str): doc = self.make_doc(doc) doc = sentencizer(doc) doc = wp(doc) if not isinstance(gold, GoldParse): gold = GoldParse(doc, **gold) new_docs.append(doc) new_golds.append(gold) docs = new_docs golds = new_golds pytt_outputs, backprop_tok2vec = tok2vec.begin_update( docs, drop=drop, **component_cfg.get("pytt_tok2vec", {}) ) assert len(docs) == len(pytt_outputs) tok2vec.set_annotations(docs, pytt_outputs) for doc in docs: assert doc._.pytt_last_hidden_state is not None with self.disable_pipes("pytt_tok2vec"): super().update( docs, golds, drop=0.0, sgd=sgd, losses=losses, component_cfg=component_cfg, ) backprop_tok2vec(docs, sgd=sgd) def resume_training(self, sgd=None, component_cfg=None, **kwargs): """Continue training a pre-trained model. Before running the normal Language.resume_training method, we do the following: * Look for a tok2vec pipeline component. By default we look for the name 'pytt_tok2vec'. This can be changed with the tok2vec_name keyword argument. If no component is found, a ValueError is raised. * If any other components have `component.model == True` and a `.begin_training()` method, we call the `.begin_training()` method. Configuration can be passed in using the component_cfg keyword argument. If unset, we also pass in a value for token_vector_width, which we read from the tok2vec component. """ if component_cfg is None: component_cfg = {} tok2vec_name = kwargs.get("tok2vec_name", "pytt_tok2vec") tok2vec = self.get_pipe(tok2vec_name) token_vector_width = tok2vec.token_vector_width for name, component in self.pipeline: if name == tok2vec_name: continue elif getattr(component, "model", None) is not True: continue elif not hasattr(component, "begin_training"): continue cfg = component_cfg.get(name, {})
cfg["tok2vec_name"] = tok2vec_name if "token_vector_width" not in component_cfg: cfg["token_vector_width"] = token_vector_width component.cfg.update(cfg) component.begin_training(pipeline=self.pipeline, sgd=False, **cfg) assert component.model is not True return super().resume_training(sgd=sgd, **kwargs) def get_defaults(lang): """Get the language-specific defaults, if available in spaCy.""" try: lang_cls = get_lang_class(lang) return lang_cls.Defaults except ImportError: return Language.Defaults def get_wp_start(span): for token in span: if token._.pytt_alignment: wp_start = token._.pytt_alignment[0] break else: return None wordpieces = span.doc._.pytt_word_pieces_ if wp_start >= 1 and is_special_token(wordpieces[wp_start - 1]): return wp_start - 1 else: return wp_start def get_wp_end(span): for token in reversed(span): if token._.pytt_alignment: wp_end = token._.pytt_alignment[-1] break else: return None wordpieces = span.doc._.pytt_word_pieces_ next_token = wp_end + 1 if next_token < len(wordpieces) and is_special_token(wordpieces[next_token]): return next_token else: return wp_end def get_span_wp_getter(attr): def span_getter(span): return [token._.get(attr) for token in span] return span_getter def get_token_wp_getter(attr): def token_alignment_getter(token): doc_values = token.doc._.get(attr) return doc_values[token.i] if doc_values is not None else None def token_wordpiece_getter(token): doc_values = token.doc._.get(attr) indices = token._.pytt_alignment return [doc_values[i] for i in indices] if attr == "pytt_alignment": return token_alignment_getter else: return token_wordpiece_getter def get_span_tok2vec_getter(attr): def span_getter(span): doc_activations = span.doc._.get(attr) if doc_activations is None: return None wp_start = span[0]._.pytt_alignment[0] wp_end = span[-1]._.pytt_alignment[-1] if wp_start is not None and wp_end is not None: return doc_activations[wp_start:wp_end] else: # Return empty slice. return doc_activations[0:0] return span_getter def get_token_tok2vec_getter(attr): def token_getter(token): # Delegate through span, so get a span with just the token. span = token.doc[token.i : token.i + 1] return span._.get(attr) return token_getter
if "tok2vec_name" not in component_cfg:
level_db.go
package store import ( "encoding/json" "fmt" "path/filepath" dbm "github.com/tendermint/tm-db" ) const ( keyDBName = "keys" infoSuffix = "info" ) var ( _ KeyDAO = LevelDBDAO{} ) type LevelDBDAO struct { db dbm.DB Crypto } // NewLevelDB initialize a keybase based on the configuration. // Use leveldb as storage func NewLevelDB(rootDir string, crypto Crypto) (KeyDAO, error) { db, err := dbm.NewGoLevelDB(keyDBName, filepath.Join(rootDir, "keys")) if err != nil { return nil, err } if crypto == nil { crypto = AES{} } levelDB := LevelDBDAO{ db: db, Crypto: crypto, } return levelDB, nil } // Write add a key information to the local store func (k LevelDBDAO) Write(name, password string, info KeyInfo) error { if k.Has(name) { return fmt.Errorf("name %s has exist", name) } privStr, err := k.Encrypt(info.PrivKeyArmor, password) if err != nil { return err } info.PrivKeyArmor = privStr bz, err := json.Marshal(info) if err != nil { return err } return k.db.SetSync(infoKey(name), bz) } // Read read a key information from the local store func (k LevelDBDAO) Read(name, password string) (store KeyInfo, err error) { bz, err := k.db.Get(infoKey(name)) if bz == nil || err != nil { return store, err } if err := json.Unmarshal(bz, &store); err != nil { return store, err } if len(password) > 0 { privStr, err := k.Decrypt(store.PrivKeyArmor, password) if err != nil { return store, err } store.PrivKeyArmor = privStr } return } // ReadMetadata read a key information from the local store func (k LevelDBDAO) ReadMetadata(name string) (store KeyInfo, err error) { bz, err := k.db.Get(infoKey(name)) if bz == nil || err != nil { return store, err } if err := json.Unmarshal(bz, &store); err != nil { return store, err } return
// Delete delete a key from the local store func (k LevelDBDAO) Delete(name, password string) error { _, err := k.Read(name, password) if err != nil { return err } return k.db.DeleteSync(infoKey(name)) } // Delete delete a key from the local store func (k LevelDBDAO) Has(name string) bool { existed, err := k.db.Has(infoKey(name)) if err != nil { return false } return existed } func infoKey(name string) []byte { return []byte(fmt.Sprintf("%s.%s", name, infoSuffix)) }
}
SimpleEventPlugin.js
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. * * @flow */ import type {EventPriority} from 'shared/ReactTypes'; import type { TopLevelType, DOMTopLevelEventType, } from 'legacy-events/TopLevelEventTypes'; import type { DispatchConfig, ReactSyntheticEvent, } from 'legacy-events/ReactSyntheticEventType'; import type {Fiber} from 'react-reconciler/src/ReactFiber'; import type {EventTypes, PluginModule} from 'legacy-events/PluginModuleType'; import type {EventSystemFlags} from 'legacy-events/EventSystemFlags'; import { DiscreteEvent, UserBlockingEvent, ContinuousEvent, } from 'shared/ReactTypes'; import {accumulateTwoPhaseDispatches} from 'legacy-events/EventPropagators'; import SyntheticEvent from 'legacy-events/SyntheticEvent'; import * as DOMTopLevelEventTypes from './DOMTopLevelEventTypes'; import warningWithoutStack from 'shared/warningWithoutStack'; import SyntheticAnimationEvent from './SyntheticAnimationEvent'; import SyntheticClipboardEvent from './SyntheticClipboardEvent'; import SyntheticFocusEvent from './SyntheticFocusEvent'; import SyntheticKeyboardEvent from './SyntheticKeyboardEvent'; import SyntheticMouseEvent from './SyntheticMouseEvent'; import SyntheticPointerEvent from './SyntheticPointerEvent'; import SyntheticDragEvent from './SyntheticDragEvent'; import SyntheticTouchEvent from './SyntheticTouchEvent'; import SyntheticTransitionEvent from './SyntheticTransitionEvent'; import SyntheticUIEvent from './SyntheticUIEvent'; import SyntheticWheelEvent from './SyntheticWheelEvent'; import getEventCharCode from './getEventCharCode'; /** * Turns * ['abort', ...] * into * eventTypes = { * 'abort': { * phasedRegistrationNames: { * bubbled: 'onAbort', * captured: 'onAbortCapture', * }, * dependencies: [TOP_ABORT], * }, * ... * }; * topLevelEventsToDispatchConfig = new Map([ * [TOP_ABORT, { sameConfig }], * ]); */ type EventTuple = [DOMTopLevelEventType, string, EventPriority]; const eventTuples: Array<EventTuple> = [ // Discrete events [DOMTopLevelEventTypes.TOP_BLUR, 'blur', DiscreteEvent], [DOMTopLevelEventTypes.TOP_CANCEL, 'cancel', DiscreteEvent], [DOMTopLevelEventTypes.TOP_CLICK, 'click', DiscreteEvent], [DOMTopLevelEventTypes.TOP_CLOSE, 'close', DiscreteEvent], [DOMTopLevelEventTypes.TOP_CONTEXT_MENU, 'contextMenu', DiscreteEvent], [DOMTopLevelEventTypes.TOP_COPY, 'copy', DiscreteEvent], [DOMTopLevelEventTypes.TOP_CUT, 'cut', DiscreteEvent], [DOMTopLevelEventTypes.TOP_AUX_CLICK, 'auxClick', DiscreteEvent], [DOMTopLevelEventTypes.TOP_DOUBLE_CLICK, 'doubleClick', DiscreteEvent], [DOMTopLevelEventTypes.TOP_DRAG_END, 'dragEnd', DiscreteEvent], [DOMTopLevelEventTypes.TOP_DRAG_START, 'dragStart', DiscreteEvent], [DOMTopLevelEventTypes.TOP_DROP, 'drop', DiscreteEvent], [DOMTopLevelEventTypes.TOP_FOCUS, 'focus', DiscreteEvent], [DOMTopLevelEventTypes.TOP_INPUT, 'input', DiscreteEvent], [DOMTopLevelEventTypes.TOP_INVALID, 'invalid', DiscreteEvent], [DOMTopLevelEventTypes.TOP_KEY_DOWN, 'keyDown', DiscreteEvent], [DOMTopLevelEventTypes.TOP_KEY_PRESS, 'keyPress', DiscreteEvent], [DOMTopLevelEventTypes.TOP_KEY_UP, 'keyUp', DiscreteEvent], [DOMTopLevelEventTypes.TOP_MOUSE_DOWN, 'mouseDown', DiscreteEvent], [DOMTopLevelEventTypes.TOP_MOUSE_UP, 'mouseUp', DiscreteEvent], [DOMTopLevelEventTypes.TOP_PASTE, 'paste', DiscreteEvent], [DOMTopLevelEventTypes.TOP_PAUSE, 'pause', DiscreteEvent], [DOMTopLevelEventTypes.TOP_PLAY, 'play', DiscreteEvent], [DOMTopLevelEventTypes.TOP_POINTER_CANCEL, 'pointerCancel', DiscreteEvent], [DOMTopLevelEventTypes.TOP_POINTER_DOWN, 'pointerDown', DiscreteEvent], [DOMTopLevelEventTypes.TOP_POINTER_UP, 'pointerUp', DiscreteEvent], [DOMTopLevelEventTypes.TOP_RATE_CHANGE, 'rateChange', DiscreteEvent], [DOMTopLevelEventTypes.TOP_RESET, 'reset', DiscreteEvent], [DOMTopLevelEventTypes.TOP_SEEKED, 'seeked', DiscreteEvent], [DOMTopLevelEventTypes.TOP_SUBMIT, 'submit', DiscreteEvent], [DOMTopLevelEventTypes.TOP_TOUCH_CANCEL, 'touchCancel', DiscreteEvent], [DOMTopLevelEventTypes.TOP_TOUCH_END, 'touchEnd', DiscreteEvent], [DOMTopLevelEventTypes.TOP_TOUCH_START, 'touchStart', DiscreteEvent], [DOMTopLevelEventTypes.TOP_VOLUME_CHANGE, 'volumeChange', DiscreteEvent], // User-blocking events [DOMTopLevelEventTypes.TOP_DRAG, 'drag', UserBlockingEvent], [DOMTopLevelEventTypes.TOP_DRAG_ENTER, 'dragEnter', UserBlockingEvent], [DOMTopLevelEventTypes.TOP_DRAG_EXIT, 'dragExit', UserBlockingEvent], [DOMTopLevelEventTypes.TOP_DRAG_LEAVE, 'dragLeave', UserBlockingEvent], [DOMTopLevelEventTypes.TOP_DRAG_OVER, 'dragOver', UserBlockingEvent], [DOMTopLevelEventTypes.TOP_MOUSE_MOVE, 'mouseMove', UserBlockingEvent], [DOMTopLevelEventTypes.TOP_MOUSE_OUT, 'mouseOut', UserBlockingEvent], [DOMTopLevelEventTypes.TOP_MOUSE_OVER, 'mouseOver', UserBlockingEvent], [DOMTopLevelEventTypes.TOP_POINTER_MOVE, 'pointerMove', UserBlockingEvent], [DOMTopLevelEventTypes.TOP_POINTER_OUT, 'pointerOut', UserBlockingEvent], [DOMTopLevelEventTypes.TOP_POINTER_OVER, 'pointerOver', UserBlockingEvent], [DOMTopLevelEventTypes.TOP_SCROLL, 'scroll', UserBlockingEvent], [DOMTopLevelEventTypes.TOP_TOGGLE, 'toggle', UserBlockingEvent], [DOMTopLevelEventTypes.TOP_TOUCH_MOVE, 'touchMove', UserBlockingEvent], [DOMTopLevelEventTypes.TOP_WHEEL, 'wheel', UserBlockingEvent], // Continuous events [DOMTopLevelEventTypes.TOP_ABORT, 'abort', ContinuousEvent], [DOMTopLevelEventTypes.TOP_ANIMATION_END, 'animationEnd', ContinuousEvent], [ DOMTopLevelEventTypes.TOP_ANIMATION_ITERATION, 'animationIteration', ContinuousEvent, ], [ DOMTopLevelEventTypes.TOP_ANIMATION_START, 'animationStart', ContinuousEvent, ], [DOMTopLevelEventTypes.TOP_CAN_PLAY, 'canPlay', ContinuousEvent], [ DOMTopLevelEventTypes.TOP_CAN_PLAY_THROUGH, 'canPlayThrough', ContinuousEvent, ], [ DOMTopLevelEventTypes.TOP_DURATION_CHANGE, 'durationChange', ContinuousEvent, ], [DOMTopLevelEventTypes.TOP_EMPTIED, 'emptied', ContinuousEvent], [DOMTopLevelEventTypes.TOP_ENCRYPTED, 'encrypted', ContinuousEvent], [DOMTopLevelEventTypes.TOP_ENDED, 'ended', ContinuousEvent], [DOMTopLevelEventTypes.TOP_ERROR, 'error', ContinuousEvent], [ DOMTopLevelEventTypes.TOP_GOT_POINTER_CAPTURE, 'gotPointerCapture', ContinuousEvent, ], [DOMTopLevelEventTypes.TOP_LOAD, 'load', ContinuousEvent], [DOMTopLevelEventTypes.TOP_LOADED_DATA, 'loadedData', ContinuousEvent], [ DOMTopLevelEventTypes.TOP_LOADED_METADATA, 'loadedMetadata', ContinuousEvent, ], [DOMTopLevelEventTypes.TOP_LOAD_START, 'loadStart', ContinuousEvent], [ DOMTopLevelEventTypes.TOP_LOST_POINTER_CAPTURE, 'lostPointerCapture', ContinuousEvent, ], [DOMTopLevelEventTypes.TOP_PLAYING, 'playing', ContinuousEvent], [DOMTopLevelEventTypes.TOP_PROGRESS, 'progress', ContinuousEvent], [DOMTopLevelEventTypes.TOP_SEEKING, 'seeking', ContinuousEvent], [DOMTopLevelEventTypes.TOP_STALLED, 'stalled', ContinuousEvent], [DOMTopLevelEventTypes.TOP_SUSPEND, 'suspend', ContinuousEvent], [DOMTopLevelEventTypes.TOP_TIME_UPDATE, 'timeUpdate', ContinuousEvent], [DOMTopLevelEventTypes.TOP_TRANSITION_END, 'transitionEnd', ContinuousEvent], [DOMTopLevelEventTypes.TOP_WAITING, 'waiting', ContinuousEvent], ]; const eventTypes: EventTypes = {}; const topLevelEventsToDispatchConfig: { [key: TopLevelType]: DispatchConfig, } = {}; for (let i = 0; i < eventTuples.length; i++) { const eventTuple = eventTuples[i]; const topEvent = eventTuple[0]; const event = eventTuple[1]; const eventPriority = eventTuple[2]; const capitalizedEvent = event[0].toUpperCase() + event.slice(1); const onEvent = 'on' + capitalizedEvent; const config = { phasedRegistrationNames: { bubbled: onEvent, captured: onEvent + 'Capture', }, dependencies: [topEvent], eventPriority, }; eventTypes[event] = config; topLevelEventsToDispatchConfig[topEvent] = config; } // Only used in DEV for exhaustiveness validation. const knownHTMLTopLevelTypes: Array<DOMTopLevelEventType> = [ DOMTopLevelEventTypes.TOP_ABORT, DOMTopLevelEventTypes.TOP_CANCEL, DOMTopLevelEventTypes.TOP_CAN_PLAY, DOMTopLevelEventTypes.TOP_CAN_PLAY_THROUGH, DOMTopLevelEventTypes.TOP_CLOSE, DOMTopLevelEventTypes.TOP_DURATION_CHANGE, DOMTopLevelEventTypes.TOP_EMPTIED, DOMTopLevelEventTypes.TOP_ENCRYPTED, DOMTopLevelEventTypes.TOP_ENDED, DOMTopLevelEventTypes.TOP_ERROR, DOMTopLevelEventTypes.TOP_INPUT, DOMTopLevelEventTypes.TOP_INVALID, DOMTopLevelEventTypes.TOP_LOAD, DOMTopLevelEventTypes.TOP_LOADED_DATA, DOMTopLevelEventTypes.TOP_LOADED_METADATA, DOMTopLevelEventTypes.TOP_LOAD_START, DOMTopLevelEventTypes.TOP_PAUSE, DOMTopLevelEventTypes.TOP_PLAY, DOMTopLevelEventTypes.TOP_PLAYING, DOMTopLevelEventTypes.TOP_PROGRESS, DOMTopLevelEventTypes.TOP_RATE_CHANGE, DOMTopLevelEventTypes.TOP_RESET, DOMTopLevelEventTypes.TOP_SEEKED, DOMTopLevelEventTypes.TOP_SEEKING, DOMTopLevelEventTypes.TOP_STALLED, DOMTopLevelEventTypes.TOP_SUBMIT, DOMTopLevelEventTypes.TOP_SUSPEND, DOMTopLevelEventTypes.TOP_TIME_UPDATE, DOMTopLevelEventTypes.TOP_TOGGLE, DOMTopLevelEventTypes.TOP_VOLUME_CHANGE, DOMTopLevelEventTypes.TOP_WAITING, ]; const SimpleEventPlugin: PluginModule<MouseEvent> & { getEventPriority: (topLevelType: TopLevelType) => EventPriority, } = { eventTypes: eventTypes, getEventPriority(topLevelType: TopLevelType): EventPriority { const config = topLevelEventsToDispatchConfig[topLevelType]; return config !== undefined ? config.eventPriority : ContinuousEvent; }, extractEvents: function( topLevelType: TopLevelType, targetInst: null | Fiber, nativeEvent: MouseEvent, nativeEventTarget: EventTarget, eventSystemFlags: EventSystemFlags, ): null | ReactSyntheticEvent { const dispatchConfig = topLevelEventsToDispatchConfig[topLevelType]; if (!dispatchConfig) { return null; } let EventConstructor; switch (topLevelType) { case DOMTopLevelEventTypes.TOP_KEY_PRESS: // Firefox creates a keypress event for function keys too. This removes // the unwanted keypress events. Enter is however both printable and // non-printable. One would expect Tab to be as well (but it isn't). if (getEventCharCode(nativeEvent) === 0) { return null; } /* falls through */ case DOMTopLevelEventTypes.TOP_KEY_DOWN: case DOMTopLevelEventTypes.TOP_KEY_UP: EventConstructor = SyntheticKeyboardEvent; break; case DOMTopLevelEventTypes.TOP_BLUR: case DOMTopLevelEventTypes.TOP_FOCUS: EventConstructor = SyntheticFocusEvent; break; case DOMTopLevelEventTypes.TOP_CLICK: // Firefox creates a click event on right mouse clicks. This removes the // unwanted click events. if (nativeEvent.button === 2) { return null; } /* falls through */ case DOMTopLevelEventTypes.TOP_AUX_CLICK:
case DOMTopLevelEventTypes.TOP_MOUSE_UP: // TODO: Disabled elements should not respond to mouse events /* falls through */ case DOMTopLevelEventTypes.TOP_MOUSE_OUT: case DOMTopLevelEventTypes.TOP_MOUSE_OVER: case DOMTopLevelEventTypes.TOP_CONTEXT_MENU: EventConstructor = SyntheticMouseEvent; break; case DOMTopLevelEventTypes.TOP_DRAG: case DOMTopLevelEventTypes.TOP_DRAG_END: case DOMTopLevelEventTypes.TOP_DRAG_ENTER: case DOMTopLevelEventTypes.TOP_DRAG_EXIT: case DOMTopLevelEventTypes.TOP_DRAG_LEAVE: case DOMTopLevelEventTypes.TOP_DRAG_OVER: case DOMTopLevelEventTypes.TOP_DRAG_START: case DOMTopLevelEventTypes.TOP_DROP: EventConstructor = SyntheticDragEvent; break; case DOMTopLevelEventTypes.TOP_TOUCH_CANCEL: case DOMTopLevelEventTypes.TOP_TOUCH_END: case DOMTopLevelEventTypes.TOP_TOUCH_MOVE: case DOMTopLevelEventTypes.TOP_TOUCH_START: EventConstructor = SyntheticTouchEvent; break; case DOMTopLevelEventTypes.TOP_ANIMATION_END: case DOMTopLevelEventTypes.TOP_ANIMATION_ITERATION: case DOMTopLevelEventTypes.TOP_ANIMATION_START: EventConstructor = SyntheticAnimationEvent; break; case DOMTopLevelEventTypes.TOP_TRANSITION_END: EventConstructor = SyntheticTransitionEvent; break; case DOMTopLevelEventTypes.TOP_SCROLL: EventConstructor = SyntheticUIEvent; break; case DOMTopLevelEventTypes.TOP_WHEEL: EventConstructor = SyntheticWheelEvent; break; case DOMTopLevelEventTypes.TOP_COPY: case DOMTopLevelEventTypes.TOP_CUT: case DOMTopLevelEventTypes.TOP_PASTE: EventConstructor = SyntheticClipboardEvent; break; case DOMTopLevelEventTypes.TOP_GOT_POINTER_CAPTURE: case DOMTopLevelEventTypes.TOP_LOST_POINTER_CAPTURE: case DOMTopLevelEventTypes.TOP_POINTER_CANCEL: case DOMTopLevelEventTypes.TOP_POINTER_DOWN: case DOMTopLevelEventTypes.TOP_POINTER_MOVE: case DOMTopLevelEventTypes.TOP_POINTER_OUT: case DOMTopLevelEventTypes.TOP_POINTER_OVER: case DOMTopLevelEventTypes.TOP_POINTER_UP: EventConstructor = SyntheticPointerEvent; break; default: if (__DEV__) { if (knownHTMLTopLevelTypes.indexOf(topLevelType) === -1) { warningWithoutStack( false, 'SimpleEventPlugin: Unhandled event type, `%s`. This warning ' + 'is likely caused by a bug in React. Please file an issue.', topLevelType, ); } } // HTML Events // @see http://www.w3.org/TR/html5/index.html#events-0 EventConstructor = SyntheticEvent; break; } const event = EventConstructor.getPooled( dispatchConfig, targetInst, nativeEvent, nativeEventTarget, ); accumulateTwoPhaseDispatches(event); return event; }, }; export default SimpleEventPlugin;
case DOMTopLevelEventTypes.TOP_DOUBLE_CLICK: case DOMTopLevelEventTypes.TOP_MOUSE_DOWN: case DOMTopLevelEventTypes.TOP_MOUSE_MOVE:
main.go
package main import ( "context" "flag" "fmt" "os" "runtime" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/client-go/rest" "github.com/99cloud/operator-sdk-samples/memcached-operator/pkg/apis" "github.com/99cloud/operator-sdk-samples/memcached-operator/pkg/controller" "github.com/operator-framework/operator-sdk/pkg/k8sutil" kubemetrics "github.com/operator-framework/operator-sdk/pkg/kube-metrics" "github.com/operator-framework/operator-sdk/pkg/leader" "github.com/operator-framework/operator-sdk/pkg/log/zap" "github.com/operator-framework/operator-sdk/pkg/metrics" "github.com/operator-framework/operator-sdk/pkg/restmapper" sdkVersion "github.com/operator-framework/operator-sdk/version" "github.com/spf13/pflag" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" ) // Change below variables to serve metrics on different host or port. var ( metricsHost = "0.0.0.0" metricsPort int32 = 8383 operatorMetricsPort int32 = 8686 ) var log = logf.Log.WithName("cmd") func printVersion() { log.Info(fmt.Sprintf("Go Version: %s", runtime.Version())) log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)) log.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version)) } func
() { // Add the zap logger flag set to the CLI. The flag set must // be added before calling pflag.Parse(). pflag.CommandLine.AddFlagSet(zap.FlagSet()) // Add flags registered by imported packages (e.g. glog and // controller-runtime) pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() // Use a zap logr.Logger implementation. If none of the zap // flags are configured (or if the zap flag set is not being // used), this defaults to a production zap logger. // // The logger instantiated here can be changed to any logger // implementing the logr.Logger interface. This logger will // be propagated through the whole operator, generating // uniform and structured logs. logf.SetLogger(zap.Logger()) printVersion() namespace, err := k8sutil.GetWatchNamespace() if err != nil { log.Error(err, "Failed to get watch namespace") os.Exit(1) } // Get a config to talk to the apiserver cfg, err := config.GetConfig() if err != nil { log.Error(err, "") os.Exit(1) } ctx := context.TODO() // Become the leader before proceeding err = leader.Become(ctx, "memcached-operator-lock") if err != nil { log.Error(err, "") os.Exit(1) } // Create a new Cmd to provide shared dependencies and start components mgr, err := manager.New(cfg, manager.Options{ Namespace: namespace, MapperProvider: restmapper.NewDynamicRESTMapper, MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort), }) if err != nil { log.Error(err, "") os.Exit(1) } log.Info("Registering Components.") // Setup Scheme for all resources if err := apis.AddToScheme(mgr.GetScheme()); err != nil { log.Error(err, "") os.Exit(1) } // Setup all Controllers if err := controller.AddToManager(mgr); err != nil { log.Error(err, "") os.Exit(1) } if err = serveCRMetrics(cfg); err != nil { log.Info("Could not generate and serve custom resource metrics", "error", err.Error()) } // Add to the below struct any other metrics ports you want to expose. servicePorts := []v1.ServicePort{ {Port: metricsPort, Name: metrics.OperatorPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort}}, {Port: operatorMetricsPort, Name: metrics.CRPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: operatorMetricsPort}}, } // Create Service object to expose the metrics port(s). service, err := metrics.CreateMetricsService(ctx, cfg, servicePorts) if err != nil { log.Info("Could not create metrics Service", "error", err.Error()) } // CreateServiceMonitors will automatically create the prometheus-operator ServiceMonitor resources // necessary to configure Prometheus to scrape metrics from this operator. services := []*v1.Service{service} _, err = metrics.CreateServiceMonitors(cfg, namespace, services) if err != nil { log.Info("Could not create ServiceMonitor object", "error", err.Error()) // If this operator is deployed to a cluster without the prometheus-operator running, it will return // ErrServiceMonitorNotPresent, which can be used to safely skip ServiceMonitor creation. if err == metrics.ErrServiceMonitorNotPresent { log.Info("Install prometheus-operator in your cluster to create ServiceMonitor objects", "error", err.Error()) } } log.Info("Starting the Cmd.") // Start the Cmd if err := mgr.Start(signals.SetupSignalHandler()); err != nil { log.Error(err, "Manager exited non-zero") os.Exit(1) } } // serveCRMetrics gets the Operator/CustomResource GVKs and generates metrics based on those types. // It serves those metrics on "http://metricsHost:operatorMetricsPort". func serveCRMetrics(cfg *rest.Config) error { // Below function returns filtered operator/CustomResource specific GVKs. // For more control override the below GVK list with your own custom logic. filteredGVK, err := k8sutil.GetGVKsFromAddToScheme(apis.AddToScheme) if err != nil { return err } // Get the namespace the operator is currently deployed in. operatorNs, err := k8sutil.GetOperatorNamespace() if err != nil { return err } // To generate metrics in other namespaces, add the values below. ns := []string{operatorNs} // Generate and serve custom resource specific metrics. err = kubemetrics.GenerateAndServeCRMetrics(cfg, ns, filteredGVK, metricsHost, operatorMetricsPort) if err != nil { return err } return nil }
main
main.py
from .routes.users import router as user_router from .application import app import sys
ROUTERS = (user_router,) for r in ROUTERS: app.include_router(r) if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=8888, log_level="info")
sys.path.extend(["./"])
simulator.rs
use crate::configurations::generals::EndTime; use crate::configurations::Configuration; use crate::entities::entity_type::Instantiable; use crate::ressources::clock; use crate::ressources::eventsmanagement::EventsManager; use crate::ressources::generals::MapBbox; use crate::ressources::lane_graph::LaneGraph; use crate::ressources::random::Random; use crate::simulation::dispatchers::add_ending_systems; use crate::simulation::dispatchers::add_starting_systems; use crate::simulation::dispatchers::make_render_dispatcher; use glutin_window::GlutinWindow as Window; use opengl_graphics::GlGraphics; use piston::event_loop::{EventSettings, Events}; use piston_window::OpenGL; use piston_window::RenderEvent; use piston_window::WindowSettings; use rts_logger::LogWriterManager; use specs::prelude::{DispatcherBuilder, World}; use specs::Dispatcher; use std::collections::HashMap; use uuid::Uuid; use crate::ressources::random_speed::RandomSpeed; //use std::process::Command; pub struct UseDebugger(pub bool); impl Default for UseDebugger { fn default() -> Self { Self(false) } } pub struct Simulation<'a, 'b> { world: World, base_dispatcher: Dispatcher<'a, 'b>, rendering: (bool, Dispatcher<'a, 'b>), window: Option<Window>, loggers: Option<LogWriterManager>, } impl<'a, 'b> Simulation<'a, 'b> { const OPENGL_VERSION: OpenGL = OpenGL::V3_2; pub fn from_config(config: Configuration) -> Self { let loggers = match &config.generals.logging { Some(logging) => Some(logging.get_manager()), None => None, }; let mut base_dispatcher_builder = DispatcherBuilder::new(); let mut world = World::new(); let mut system_mapping = HashMap::<String, Vec<String>>::new(); let is_rendering_on: bool = config.generals.debugger.on; let width: f64 = config.generals.debugger.width; let height: f64 = config.generals.debugger.height; let window = if is_rendering_on { Some(Self::create_window(width, height)) } else { None }; Self::create_ressources(&mut world, &config); config.systems.declare_systems(&mut system_mapping); add_starting_systems(&mut base_dispatcher_builder); config .systems .setup_systems(&mut base_dispatcher_builder, &system_mapping); add_ending_systems(&mut base_dispatcher_builder); let mut base_dispatcher = base_dispatcher_builder.build(); base_dispatcher.setup(&mut world.res); let rendering = if is_rendering_on { let mut render_dispatcher = make_render_dispatcher(); render_dispatcher.setup(&mut world.res); (is_rendering_on, render_dispatcher) } else { (is_rendering_on, DispatcherBuilder::new().build()) }; world.add_resource(UseDebugger(is_rendering_on)); //entities if let Some(entities) = config.entities { for entity in entities.iter() { entity.create(&mut world, is_rendering_on); } } Self { world, base_dispatcher, window, rendering, loggers, } } pub fn run_simulation(&mut self) { let mut events = Events::new(EventSettings::new()); let is_render_on = self.rendering.0; let mut is_simulation_running = true; while should_keep_going(is_render_on, is_simulation_running) { if !simulation_ended(&self.world) { is_simulation_running = true; self.base_dispatcher.dispatch(&self.world.res); self.world.maintain(); } else { is_simulation_running = false; } if is_render_on { if let Some(ref mut window) = &mut self.window { if let Some(e) = events.next(&mut *window) { if let Some(r) = e.render_args() { self.world.add_resource(r); self.rendering.1.dispatch(&self.world.res); self.world.maintain(); } } }; } } println!("Showing results log..."); } fn create_window(width: f64, height: f64) -> Window { WindowSettings::new("Simumo - Visual debugger", [width, height]) .opengl(Self::OPENGL_VERSION) .exit_on_esc(true) .build() .unwrap() } // ///Create default world's ressources and config's ressources fn
(world: &mut World, config: &Configuration) { let end_time = config.generals.end_time.clone(); let seed = if !config.generals.seed.is_empty() { Uuid::parse_str(&config.generals.seed).unwrap_or_else(|_| panic!("invalid seed format")) } else { Uuid::new_v4() }; let random = Random::from_uuid(&seed); let (lane_graph, bbox): (LaneGraph, MapBbox) = config.map.forward_ressources(); if config.generals.debugger.on { let graphics_handle = GlGraphics::new(Self::OPENGL_VERSION); let debugger = config.generals.debugger.clone(); world.add_resource(graphics_handle); world.add_resource(debugger); world.add_resource(bbox); } world.add_resource(lane_graph); world.add_resource(end_time); world.add_resource(clock::Clock::new(config.generals.clock_dt)); world.add_resource(EventsManager::new()); world.add_resource(random); if let Some(random_speed) = config.generals.random_speed{ world.add_resource(RandomSpeed(random_speed) ); } else { world.add_resource(RandomSpeed(false) ); } } } fn should_keep_going(_is_render_on: bool, is_simulation_running: bool) -> bool { // note :: this is commented because of the python bug // basically we cant Ctrl+C the process while OpenGL + python is running in the same time // todo :: find a fix for the problem above //if is_render_on { // return true; //} is_simulation_running } fn simulation_ended(ressources: &World) -> bool { // if keyboard end event + let clock = ressources.read_resource::<clock::Clock>(); let end_time = ressources.read_resource::<EndTime>(); clock.get_time() >= end_time.val }
create_ressources
thermal_sensor.go
// Copyright 2016 The Periph Authors. All rights reserved. // Use of this source code is governed under the Apache License, Version 2.0 // that can be found in the LICENSE file. package sysfs import ( "errors" "fmt" "os" "path/filepath" "sort" "strconv" "sync" "time" "periph.io/x/periph" "periph.io/x/periph/conn/physic" ) // ThermalSensors is all the sensors discovered on this host via sysfs. var ThermalSensors []*ThermalSensor // ThermalSensorByName returns a *ThermalSensor for the sensor name, if any. func ThermalSensorByName(name string) (*ThermalSensor, error) { // TODO(maruel): Use a bisect or a map. For now we don't expect more than a // handful of thermal sensors so it doesn't matter. for _, t := range ThermalSensors { if t.name == name { if err := t.open(); err != nil { return nil, err } return t, nil } } return nil, errors.New("sysfs-thermal: invalid sensor name") } // ThermalSensor represents one thermal sensor on the system. type ThermalSensor struct { name string root string mu sync.Mutex nameType string f fileIO precision physic.Temperature } func (t *ThermalSensor) String() string { return t.name } // Halt implements conn.Resource. It is a noop. func (t *ThermalSensor) Halt() error { return nil } // Type returns the type of sensor as exported by sysfs. func (t *ThermalSensor) Type() string { t.mu.Lock() defer t.mu.Unlock() if t.nameType == "" { f, err := fileIOOpen(t.root+"type", os.O_RDONLY) if err != nil { return fmt.Sprintf("sysfs-thermal: %v", err) } defer f.Close() var buf [256]byte n, err := f.Read(buf[:]) if err != nil { return fmt.Sprintf("sysfs-thermal: %v", err) } if n < 2 { t.nameType = "<unknown>" } else { t.nameType = string(buf[:n-1]) } } return t.nameType } // Sense implements physic.SenseEnv. func (t *ThermalSensor) Sense(e *physic.Env) error { if err := t.open(); err != nil { return err } t.mu.Lock() defer t.mu.Unlock() var buf [24]byte n, err := seekRead(t.f, buf[:]) if err != nil { return fmt.Errorf("sysfs-thermal: %v", err) } if n < 2 { return errors.New("sysfs-thermal: failed to read temperature") } i, err := strconv.Atoi(string(buf[:n-1])) if err != nil { return fmt.Errorf("sysfs-thermal: %v", err) } if t.precision == 0 { t.precision = physic.MilliKelvin if i < 100 { t.precision *= 1000 } } e.Temperature = physic.Temperature(i)*t.precision + physic.ZeroCelsius return nil } // SenseContinuous implements physic.SenseEnv. func (t *ThermalSensor) SenseContinuous(interval time.Duration) (<-chan physic.Env, error) { // TODO(maruel): Manually poll in a loop via time.NewTicker. return nil, errors.New("sysfs-thermal: not implemented") } // Precision implements physic.SenseEnv. func (t *ThermalSensor) Precision(e *physic.Env) { if t.precision == 0 { dummy := physic.Env{} // Ignore the error. _ = t.Sense(&dummy) } t.mu.Lock() defer t.mu.Unlock() e.Temperature = t.precision } // func (t *ThermalSensor) open() error { t.mu.Lock() defer t.mu.Unlock() if t.f != nil { return nil } f, err := fileIOOpen(t.root+"temp", os.O_RDONLY) if err != nil { return fmt.Errorf("sysfs-thermal: %v", err) } t.f = f return nil } // driverThermalSensor implements periph.Driver. type driverThermalSensor struct { } func (d *driverThermalSensor) String() string { return "sysfs-thermal" } func (d *driverThermalSensor) Prerequisites() []string { return nil } func (d *driverThermalSensor) After() []string { return nil } // Init initializes thermal sysfs handling code. // // Uses sysfs as described* at // https://www.kernel.org/doc/Documentation/thermal/sysfs-api.txt // // * for the most minimalistic meaning of 'described'. func (d *driverThermalSensor) Init() (bool, error) { // This driver is only registered on linux, so there is no legitimate time to // skip it. items, err := filepath.Glob("/sys/class/thermal/*/temp") if err != nil { return true, err } if len(items) == 0 { return false, errors.New("sysfs-thermal: no sensor found") } sort.Strings(items) for _, item := range items { base := filepath.Dir(item) ThermalSensors = append(ThermalSensors, &ThermalSensor{ name: filepath.Base(base), root: base + "/", }) } return true, nil } func
() { if isLinux { periph.MustRegister(&drvThermalSensor) } } var drvThermalSensor driverThermalSensor var _ physic.SenseEnv = &ThermalSensor{} var _ fmt.Stringer = &ThermalSensor{}
init
test_calc_class.py
import pytest from calc_class import Calculator # 상수 NUMBER_1 = 3.0 NUMBER_2 = 2.0 # Fixtures @pytest.fixture def calculator(): return Calculator() def verify_answer(expected, answer, last_answer): assert expected == answer assert expected == last_answer # ======Test Cases 시작====== def test_last_answer_init(calculator): # TODO : Test Code def test_add(calculator): # TODO: Use NUMBER_1, NUMBER_2을 사용해 Test def test_subtract(calculator): # TODO: Use NUMBER_1, NUMBER_2을 사용해 Test def test_subtract_negative(calculator): # TODO: Use NUMBER_1, NUMBER_2을 사용해 Test def test_multiply(calculator): # TODO: Use NUMBER_1, NUMBER_2을 사용해 Test def test_divide(calculator): # TODO: Use NUMBER_1, NUMBER_2을 사용해 Test def test_divide_by_zero(calculator): # TODO : ZeroDivisionError가 나오는지 확인하는 Test @pytest.mark.parametrize("a,b,expected", [ (NUMBER_1, NUMBER_2, NUMBER_1), (NUMBER_2, NUMBER_1, NUMBER_1), (NUMBER_1, NUMBER_1, NUMBER_1), ]) def test_maximum(calculator, a, b, expected): # TODO : parametrize
주입 @pytest.mark.parametrize("a,b,expected", [ (NUMBER_1, NUMBER_2, NUMBER_2), (NUMBER_2, NUMBER_1, NUMBER_2), (NUMBER_2, NUMBER_2, NUMBER_2), ]) def test_minimum(calculator, a, b, expected): # TODO : parametrize를 사용해 파라미터를 주입
를 사용해 파라미터를
json.rs
//! Json extractor/responder use std::future::Future; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; use std::{fmt, ops}; use bytes::BytesMut; use futures_util::future::{err, ok, FutureExt, LocalBoxFuture, Ready}; use futures_util::StreamExt; use serde::de::DeserializeOwned; use serde::Serialize; use actix_http::http::{header::CONTENT_LENGTH, StatusCode}; use actix_http::{HttpMessage, Payload, Response}; #[cfg(feature = "compress")] use crate::dev::Decompress; use crate::error::{Error, JsonPayloadError}; use crate::extract::FromRequest; use crate::request::HttpRequest; use crate::responder::Responder; /// Json helper /// /// Json can be used for two different purpose. First is for json response /// generation and second is for extracting typed information from request's /// payload. /// /// To extract typed information from request's body, the type `T` must /// implement the `Deserialize` trait from *serde*. /// /// [**JsonConfig**](struct.JsonConfig.html) allows to configure extraction /// process. /// /// ## Example /// /// ```rust /// use actix_web::{web, App}; /// use serde_derive::Deserialize; /// /// #[derive(Deserialize)] /// struct Info { /// username: String, /// } /// /// /// deserialize `Info` from request's body /// async fn index(info: web::Json<Info>) -> String { /// format!("Welcome {}!", info.username) /// } /// /// fn main() { /// let app = App::new().service( /// web::resource("/index.html").route( /// web::post().to(index)) /// ); /// } /// ``` /// /// The `Json` type allows you to respond with well-formed JSON data: simply /// return a value of type Json<T> where T is the type of a structure /// to serialize into *JSON*. The type `T` must implement the `Serialize` /// trait from *serde*. /// /// ```rust /// use actix_web::*; /// use serde_derive::Serialize; /// /// #[derive(Serialize)] /// struct MyObj { /// name: String, /// } /// /// fn index(req: HttpRequest) -> Result<web::Json<MyObj>> { /// Ok(web::Json(MyObj { /// name: req.match_info().get("name").unwrap().to_string(), /// })) /// } /// # fn main() {} /// ``` pub struct Json<T>(pub T); impl<T> Json<T> { /// Deconstruct to an inner value pub fn into_inner(self) -> T { self.0 } } impl<T> ops::Deref for Json<T> { type Target = T; fn deref(&self) -> &T { &self.0 } } impl<T> ops::DerefMut for Json<T> { fn deref_mut(&mut self) -> &mut T { &mut self.0 } } impl<T> fmt::Debug for Json<T> where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Json: {:?}", self.0) } } impl<T> fmt::Display for Json<T> where T: fmt::Display, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&self.0, f) } } impl<T: Serialize> Responder for Json<T> { type Error = Error; type Future = Ready<Result<Response, Error>>; fn respond_to(self, _: &HttpRequest) -> Self::Future { let body = match serde_json::to_string(&self.0) { Ok(body) => body, Err(e) => return err(e.into()), }; ok(Response::build(StatusCode::OK) .content_type("application/json") .body(body)) } } /// Json extractor. Allow to extract typed information from request's /// payload. /// /// To extract typed information from request's body, the type `T` must /// implement the `Deserialize` trait from *serde*. /// /// [**JsonConfig**](struct.JsonConfig.html) allows to configure extraction /// process. /// /// ## Example /// /// ```rust /// use actix_web::{web, App}; /// use serde_derive::Deserialize; /// /// #[derive(Deserialize)] /// struct Info { /// username: String, /// } /// /// /// deserialize `Info` from request's body /// async fn index(info: web::Json<Info>) -> String { /// format!("Welcome {}!", info.username) /// } /// /// fn main() { /// let app = App::new().service( /// web::resource("/index.html").route( /// web::post().to(index)) /// ); /// } /// ``` impl<T> FromRequest for Json<T> where T: DeserializeOwned + 'static, { type Error = Error; type Future = LocalBoxFuture<'static, Result<Self, Error>>; type Config = JsonConfig; #[inline] fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future { let req2 = req.clone(); let (limit, err, ctype) = req .app_data::<Self::Config>() .map(|c| (c.limit, c.ehandler.clone(), c.content_type.clone())) .unwrap_or((32768, None, None)); JsonBody::new(req, payload, ctype) .limit(limit) .map(move |res| match res { Err(e) => { log::debug!( "Failed to deserialize Json from payload. \ Request path: {}", req2.path() ); if let Some(err) = err { Err((*err)(e, &req2)) } else { Err(e.into()) } } Ok(data) => Ok(Json(data)), }) .boxed_local() } } /// Json extractor configuration /// /// # Examples /// /// ```rust /// use actix_web::{error, web, App, FromRequest, HttpRequest, HttpResponse}; /// use serde_derive::Deserialize; /// /// #[derive(Deserialize)] /// struct Info { /// username: String, /// } /// /// /// deserialize `Info` from request's body, max payload size is 4kb /// async fn index(info: web::Json<Info>) -> String { /// format!("Welcome {}!", info.username) /// } /// /// /// Return either a 400 or 415, and include the error message from serde /// /// in the response body /// fn json_error_handler(err: error::JsonPayloadError, _req: &HttpRequest) -> error::Error { /// let detail = err.to_string(); /// let response = match &err { /// error::JsonPayloadError::ContentType => { /// HttpResponse::UnsupportedMediaType().content_type("text/plain").body(detail) /// } /// _ => HttpResponse::BadRequest().content_type("text/plain").body(detail), /// }; /// error::InternalError::from_response(err, response).into() /// } /// /// fn main() { /// let app = App::new().service( /// web::resource("/index.html") /// .app_data( /// // change json extractor configuration /// web::Json::<Info>::configure(|cfg| { /// cfg.limit(4096) /// .content_type(|mime| { // <- accept text/plain content type /// mime.type_() == mime::TEXT && mime.subtype() == mime::PLAIN /// }) /// .error_handler(json_error_handler) // Use our custom error response /// })) /// .route(web::post().to(index)) /// ); /// } /// ``` #[derive(Clone)] pub struct JsonConfig { limit: usize, ehandler: Option<Arc<dyn Fn(JsonPayloadError, &HttpRequest) -> Error + Send + Sync>>, content_type: Option<Arc<dyn Fn(mime::Mime) -> bool + Send + Sync>>, } impl JsonConfig { /// Change max size of payload. By default max size is 32Kb pub fn limit(mut self, limit: usize) -> Self { self.limit = limit; self } /// Set custom error handler pub fn error_handler<F>(mut self, f: F) -> Self where F: Fn(JsonPayloadError, &HttpRequest) -> Error + Send + Sync + 'static, { self.ehandler = Some(Arc::new(f)); self } /// Set predicate for allowed content types pub fn content_type<F>(mut self, predicate: F) -> Self where F: Fn(mime::Mime) -> bool + Send + Sync + 'static, { self.content_type = Some(Arc::new(predicate)); self } } impl Default for JsonConfig { fn default() -> Self { JsonConfig { limit: 32768, ehandler: None, content_type: None, } } } /// Request's payload json parser, it resolves to a deserialized `T` value. /// This future could be used with `ServiceRequest` and `ServiceFromRequest`. /// /// Returns error: /// /// * content type is not `application/json` /// (unless specified in [`JsonConfig`](struct.JsonConfig.html)) /// * content length is greater than 256k pub struct
<U> { limit: usize, length: Option<usize>, #[cfg(feature = "compress")] stream: Option<Decompress<Payload>>, #[cfg(not(feature = "compress"))] stream: Option<Payload>, err: Option<JsonPayloadError>, fut: Option<LocalBoxFuture<'static, Result<U, JsonPayloadError>>>, } impl<U> JsonBody<U> where U: DeserializeOwned + 'static, { /// Create `JsonBody` for request. pub fn new( req: &HttpRequest, payload: &mut Payload, ctype: Option<Arc<dyn Fn(mime::Mime) -> bool + Send + Sync>>, ) -> Self { // check content-type let json = if let Ok(Some(mime)) = req.mime_type() { mime.subtype() == mime::JSON || mime.suffix() == Some(mime::JSON) || ctype.as_ref().map_or(false, |predicate| predicate(mime)) } else { false }; if !json { return JsonBody { limit: 262_144, length: None, stream: None, fut: None, err: Some(JsonPayloadError::ContentType), }; } let len = req .headers() .get(&CONTENT_LENGTH) .and_then(|l| l.to_str().ok()) .and_then(|s| s.parse::<usize>().ok()); #[cfg(feature = "compress")] let payload = Decompress::from_headers(payload.take(), req.headers()); #[cfg(not(feature = "compress"))] let payload = payload.take(); JsonBody { limit: 262_144, length: len, stream: Some(payload), fut: None, err: None, } } /// Change max size of payload. By default max size is 256Kb pub fn limit(mut self, limit: usize) -> Self { self.limit = limit; self } } impl<U> Future for JsonBody<U> where U: DeserializeOwned + 'static, { type Output = Result<U, JsonPayloadError>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { if let Some(ref mut fut) = self.fut { return Pin::new(fut).poll(cx); } if let Some(err) = self.err.take() { return Poll::Ready(Err(err)); } let limit = self.limit; if let Some(len) = self.length.take() { if len > limit { return Poll::Ready(Err(JsonPayloadError::Overflow)); } } let mut stream = self.stream.take().unwrap(); self.fut = Some( async move { let mut body = BytesMut::with_capacity(8192); while let Some(item) = stream.next().await { let chunk = item?; if (body.len() + chunk.len()) > limit { return Err(JsonPayloadError::Overflow); } else { body.extend_from_slice(&chunk); } } Ok(serde_json::from_slice::<U>(&body)?) } .boxed_local(), ); self.poll(cx) } } #[cfg(test)] mod tests { use bytes::Bytes; use serde_derive::{Deserialize, Serialize}; use super::*; use crate::error::InternalError; use crate::http::header; use crate::test::{load_stream, TestRequest}; use crate::HttpResponse; #[derive(Serialize, Deserialize, PartialEq, Debug)] struct MyObject { name: String, } fn json_eq(err: JsonPayloadError, other: JsonPayloadError) -> bool { match err { JsonPayloadError::Overflow => match other { JsonPayloadError::Overflow => true, _ => false, }, JsonPayloadError::ContentType => match other { JsonPayloadError::ContentType => true, _ => false, }, _ => false, } } #[actix_rt::test] async fn test_responder() { let req = TestRequest::default().to_http_request(); let j = Json(MyObject { name: "test".to_string(), }); let resp = j.respond_to(&req).await.unwrap(); assert_eq!(resp.status(), StatusCode::OK); assert_eq!( resp.headers().get(header::CONTENT_TYPE).unwrap(), header::HeaderValue::from_static("application/json") ); use crate::responder::tests::BodyTest; assert_eq!(resp.body().bin_ref(), b"{\"name\":\"test\"}"); } #[actix_rt::test] async fn test_custom_error_responder() { let (req, mut pl) = TestRequest::default() .header( header::CONTENT_TYPE, header::HeaderValue::from_static("application/json"), ) .header( header::CONTENT_LENGTH, header::HeaderValue::from_static("16"), ) .set_payload(Bytes::from_static(b"{\"name\": \"test\"}")) .app_data(JsonConfig::default().limit(10).error_handler(|err, _| { let msg = MyObject { name: "invalid request".to_string(), }; let resp = HttpResponse::BadRequest() .body(serde_json::to_string(&msg).unwrap()); InternalError::from_response(err, resp).into() })) .to_http_parts(); let s = Json::<MyObject>::from_request(&req, &mut pl).await; let mut resp = Response::from_error(s.err().unwrap().into()); assert_eq!(resp.status(), StatusCode::BAD_REQUEST); let body = load_stream(resp.take_body()).await.unwrap(); let msg: MyObject = serde_json::from_slice(&body).unwrap(); assert_eq!(msg.name, "invalid request"); } #[actix_rt::test] async fn test_extract() { let (req, mut pl) = TestRequest::default() .header( header::CONTENT_TYPE, header::HeaderValue::from_static("application/json"), ) .header( header::CONTENT_LENGTH, header::HeaderValue::from_static("16"), ) .set_payload(Bytes::from_static(b"{\"name\": \"test\"}")) .to_http_parts(); let s = Json::<MyObject>::from_request(&req, &mut pl).await.unwrap(); assert_eq!(s.name, "test"); assert_eq!( s.into_inner(), MyObject { name: "test".to_string() } ); let (req, mut pl) = TestRequest::default() .header( header::CONTENT_TYPE, header::HeaderValue::from_static("application/json"), ) .header( header::CONTENT_LENGTH, header::HeaderValue::from_static("16"), ) .set_payload(Bytes::from_static(b"{\"name\": \"test\"}")) .app_data(JsonConfig::default().limit(10)) .to_http_parts(); let s = Json::<MyObject>::from_request(&req, &mut pl).await; assert!(format!("{}", s.err().unwrap()) .contains("Json payload size is bigger than allowed")); let (req, mut pl) = TestRequest::default() .header( header::CONTENT_TYPE, header::HeaderValue::from_static("application/json"), ) .header( header::CONTENT_LENGTH, header::HeaderValue::from_static("16"), ) .set_payload(Bytes::from_static(b"{\"name\": \"test\"}")) .app_data( JsonConfig::default() .limit(10) .error_handler(|_, _| JsonPayloadError::ContentType.into()), ) .to_http_parts(); let s = Json::<MyObject>::from_request(&req, &mut pl).await; assert!(format!("{}", s.err().unwrap()).contains("Content type error")); } #[actix_rt::test] async fn test_json_body() { let (req, mut pl) = TestRequest::default().to_http_parts(); let json = JsonBody::<MyObject>::new(&req, &mut pl, None).await; assert!(json_eq(json.err().unwrap(), JsonPayloadError::ContentType)); let (req, mut pl) = TestRequest::default() .header( header::CONTENT_TYPE, header::HeaderValue::from_static("application/text"), ) .to_http_parts(); let json = JsonBody::<MyObject>::new(&req, &mut pl, None).await; assert!(json_eq(json.err().unwrap(), JsonPayloadError::ContentType)); let (req, mut pl) = TestRequest::default() .header( header::CONTENT_TYPE, header::HeaderValue::from_static("application/json"), ) .header( header::CONTENT_LENGTH, header::HeaderValue::from_static("10000"), ) .to_http_parts(); let json = JsonBody::<MyObject>::new(&req, &mut pl, None) .limit(100) .await; assert!(json_eq(json.err().unwrap(), JsonPayloadError::Overflow)); let (req, mut pl) = TestRequest::default() .header( header::CONTENT_TYPE, header::HeaderValue::from_static("application/json"), ) .header( header::CONTENT_LENGTH, header::HeaderValue::from_static("16"), ) .set_payload(Bytes::from_static(b"{\"name\": \"test\"}")) .to_http_parts(); let json = JsonBody::<MyObject>::new(&req, &mut pl, None).await; assert_eq!( json.ok().unwrap(), MyObject { name: "test".to_owned() } ); } #[actix_rt::test] async fn test_with_json_and_bad_content_type() { let (req, mut pl) = TestRequest::with_header( header::CONTENT_TYPE, header::HeaderValue::from_static("text/plain"), ) .header( header::CONTENT_LENGTH, header::HeaderValue::from_static("16"), ) .set_payload(Bytes::from_static(b"{\"name\": \"test\"}")) .app_data(JsonConfig::default().limit(4096)) .to_http_parts(); let s = Json::<MyObject>::from_request(&req, &mut pl).await; assert!(s.is_err()) } #[actix_rt::test] async fn test_with_json_and_good_custom_content_type() { let (req, mut pl) = TestRequest::with_header( header::CONTENT_TYPE, header::HeaderValue::from_static("text/plain"), ) .header( header::CONTENT_LENGTH, header::HeaderValue::from_static("16"), ) .set_payload(Bytes::from_static(b"{\"name\": \"test\"}")) .app_data(JsonConfig::default().content_type(|mime: mime::Mime| { mime.type_() == mime::TEXT && mime.subtype() == mime::PLAIN })) .to_http_parts(); let s = Json::<MyObject>::from_request(&req, &mut pl).await; assert!(s.is_ok()) } #[actix_rt::test] async fn test_with_json_and_bad_custom_content_type() { let (req, mut pl) = TestRequest::with_header( header::CONTENT_TYPE, header::HeaderValue::from_static("text/html"), ) .header( header::CONTENT_LENGTH, header::HeaderValue::from_static("16"), ) .set_payload(Bytes::from_static(b"{\"name\": \"test\"}")) .app_data(JsonConfig::default().content_type(|mime: mime::Mime| { mime.type_() == mime::TEXT && mime.subtype() == mime::PLAIN })) .to_http_parts(); let s = Json::<MyObject>::from_request(&req, &mut pl).await; assert!(s.is_err()) } }
JsonBody
hello_grpc.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 // protoc v3.19.3 // source: hello_grpc.proto package hello_grpc import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) type Req struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` } func (x *Req) Reset() { *x = Req{} if protoimpl.UnsafeEnabled { mi := &file_hello_grpc_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Req) String() string { return protoimpl.X.MessageStringOf(x) } func (*Req) ProtoMessage() {} func (x *Req) ProtoReflect() protoreflect.Message { mi := &file_hello_grpc_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Req.ProtoReflect.Descriptor instead. func (*Req) Descriptor() ([]byte, []int) { return file_hello_grpc_proto_rawDescGZIP(), []int{0} } func (x *Req) GetMessage() string { if x != nil { return x.Message } return "" } type Res struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` } func (x *Res) Reset() { *x = Res{} if protoimpl.UnsafeEnabled { mi := &file_hello_grpc_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Res) String() string { return protoimpl.X.MessageStringOf(x) } func (*Res) ProtoMessage() {} func (x *Res) ProtoReflect() protoreflect.Message { mi := &file_hello_grpc_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Res.ProtoReflect.Descriptor instead. func (*Res) Descriptor() ([]byte, []int) { return file_hello_grpc_proto_rawDescGZIP(), []int{1} } func (x *Res) GetMessage() string { if x != nil { return x.Message } return "" } var File_hello_grpc_proto protoreflect.FileDescriptor var file_hello_grpc_proto_rawDesc = []byte{ 0x0a, 0x10, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x22, 0x1f, 0x0a, 0x03, 0x52, 0x65, 0x71, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x1f, 0x0a, 0x03, 0x52, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0x36, 0x0a, 0x09, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x47, 0x52, 0x50, 0x43, 0x12, 0x29, 0x0a, 0x05, 0x53, 0x61, 0x79, 0x48, 0x69, 0x12, 0x0f, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x71, 0x1a, 0x0f, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x42, 0x0f, 0x5a, 0x0d, 0x2e, 0x2f, 0x3b, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( file_hello_grpc_proto_rawDescOnce sync.Once file_hello_grpc_proto_rawDescData = file_hello_grpc_proto_rawDesc ) func file_hello_grpc_proto_rawDescGZIP() []byte { file_hello_grpc_proto_rawDescOnce.Do(func() { file_hello_grpc_proto_rawDescData = protoimpl.X.CompressGZIP(file_hello_grpc_proto_rawDescData) }) return file_hello_grpc_proto_rawDescData } var file_hello_grpc_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_hello_grpc_proto_goTypes = []interface{}{ (*Req)(nil), // 0: hello_grpc.Req (*Res)(nil), // 1: hello_grpc.Res } var file_hello_grpc_proto_depIdxs = []int32{ 0, // 0: hello_grpc.HelloGRPC.SayHi:input_type -> hello_grpc.Req 1, // 1: hello_grpc.HelloGRPC.SayHi:output_type -> hello_grpc.Res 1, // [1:2] is the sub-list for method output_type 0, // [0:1] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name } func init() { file_hello_grpc_proto_init() } func file_hello_grpc_proto_init()
{ if File_hello_grpc_proto != nil { return } if !protoimpl.UnsafeEnabled { file_hello_grpc_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Req); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_hello_grpc_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Res); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_hello_grpc_proto_rawDesc, NumEnums: 0, NumMessages: 2, NumExtensions: 0, NumServices: 1, }, GoTypes: file_hello_grpc_proto_goTypes, DependencyIndexes: file_hello_grpc_proto_depIdxs, MessageInfos: file_hello_grpc_proto_msgTypes, }.Build() File_hello_grpc_proto = out.File file_hello_grpc_proto_rawDesc = nil file_hello_grpc_proto_goTypes = nil file_hello_grpc_proto_depIdxs = nil }
race_task.py
from opensoar.task.task import Task from opensoar.utilities.helper_functions import calculate_distance, double_iterator, \ seconds_time_difference_fixes, add_seconds class RaceTask(Task):
""" Race task. """ def __init__(self, waypoints, timezone=None, start_opening=None, start_time_buffer=0, multistart=False): """ :param waypoints: see super() :param timezone: see super() :param start_opening: see super() :param start_time_buffer: see super() :param multistart: see super() """ super().__init__(waypoints, timezone, start_opening, start_time_buffer, multistart) self.distances = self.calculate_task_distances() def __eq__(self, other): return super().__eq__(other) @property def total_distance(self): return sum(self.distances) def calculate_task_distances(self): distances = list() for leg in range(self.no_legs): begin = self.waypoints[leg] end = self.waypoints[leg+1] # next is built in name distance = calculate_distance(begin.fix, end.fix) if begin.distance_correction == "shorten_legs": if end.distance_correction == "shorten_legs": distance = Task.distance_shortened_leg(distance, begin, end, "current") distance = Task.distance_shortened_leg(distance, begin, end, "end") elif end.distance_correction == "move_tp": distance = Task.distance_moved_turnpoint(distance, begin, end, "end") distance = Task.distance_shortened_leg(distance, begin, end, "current") elif end.distance_correction is None: distance = Task.distance_shortened_leg(distance, begin, end, "current") else: raise ValueError("This distance correction does not exist: %s" % end.distance_correction) elif begin.distance_correction == "move_tp": if end.distance_correction == "shorten_legs": distance = Task.distance_moved_turnpoint(distance, begin, end, "begin") distance = Task.distance_shortened_leg(distance, begin, end, "end") elif end.distance_correction == "move_tp": distance = Task.distance_moved_turnpoint(distance, begin, end, "begin") distance = Task.distance_moved_turnpoint(distance, begin, end, "both_end") elif end.distance_correction is None: distance = Task.distance_moved_turnpoint(distance, begin, end, "begin") else: raise ValueError("This distance correction does not exist: %s" % end.distance_correction) elif begin.distance_correction is None: if end.distance_correction == "shorten_legs": distance = Task.distance_shortened_leg(distance, begin, end, "end") elif end.distance_correction == "move_tp": distance = Task.distance_moved_turnpoint(distance, begin, end, "end") elif end.distance_correction is None: pass else: raise ValueError("This distance correction does not exist: %s" % end.distance_correction) else: raise ValueError("This distance correction does not exist: %s" % self.waypoints[leg].distance_correction) distances.append(distance) return distances def apply_rules(self, trace): fixes, outlanding_fix = self.determine_trip_fixes(trace) if len(fixes) > 0: distances = self.determine_trip_distances(fixes, outlanding_fix) refined_start = self.determine_refined_start(trace, fixes) finish_time = fixes[-1]['time'] return fixes, refined_start, outlanding_fix, distances, finish_time return [], None, None, [0, 0], None def determine_trip_fixes(self, trace): leg = -1 enl_first_fix = None enl_registered = False fixes = list() start_fixes = list() for fix_minus1, fix in double_iterator(trace): if not enl_registered and self.enl_value_exceeded(fix): if enl_first_fix is None: enl_first_fix = fix_minus1 enl_time = seconds_time_difference_fixes(enl_first_fix, fix) enl_registered = enl_registered or self.enl_time_exceeded(enl_time) elif not enl_registered: enl_first_fix = None if self.start_opening is None: after_start_opening = True else: after_start_opening = add_seconds(fix['time'], self.start_time_buffer) > self.start_opening if leg == -1 and after_start_opening: if self.started(fix_minus1, fix): fixes.append(fix_minus1) start_fixes.append(fix_minus1) leg += 1 enl_first_fix = None enl_registered = False elif leg == 0: if self.started(fix_minus1, fix): # restart fixes[0] = fix_minus1 start_fixes.append(fix_minus1) enl_first_fix = None enl_registered = False if self.finished_leg(leg, fix_minus1, fix) and not enl_registered: fixes.append(fix) leg += 1 elif 0 < leg < self.no_legs: if self.finished_leg(leg, fix_minus1, fix) and not enl_registered: fixes.append(fix) leg += 1 enl_fix = enl_first_fix if enl_registered else None outlanding_fix = None if len(fixes) > 0 and len(fixes) is not len(self.waypoints): outlanding_fix = self.determine_outlanding_fix(trace, fixes, start_fixes, enl_fix) return fixes, outlanding_fix def determine_outlanding_fix(self, trace, fixes, start_fixes, enl_fix): outlanding_leg = len(fixes) - 1 # check if there is an actual outlanding if len(fixes) == len(self.waypoints): return None # determine range within trace to be examined for outlanding fix last_tp_i = trace.index(fixes[-1]) if outlanding_leg != 0 else trace.index(start_fixes[0]) if enl_fix is not None: last_index = trace.index(enl_fix) else: last_index = len(trace) - 1 # find fix which maximizes the distance outlanding_fix = max(trace[last_tp_i:last_index + 1], key=lambda x: self.determine_outlanding_distance(outlanding_leg, x)) max_distance = self.determine_outlanding_distance(outlanding_leg, outlanding_fix) if max_distance < 0: # no out-landing fix that improves the distance if enl_fix is not None: outlanding_fix = enl_fix else: outlanding_fix = trace[-1] return outlanding_fix def determine_outlanding_distance(self, outlanding_leg, fix): previous_waypoint = self.waypoints[outlanding_leg] next_waypoint = self.waypoints[outlanding_leg + 1] # outlanding distance = distance between tps minus distance from next tp to outlanding outlanding_dist = calculate_distance(previous_waypoint.fix, next_waypoint.fix) outlanding_dist -= calculate_distance(next_waypoint.fix, fix) return outlanding_dist if outlanding_dist > 0 else 0 def determine_trip_distances(self, fixes, outlanding_fix): distances = list() for leg, fix in enumerate(fixes[1:]): distances.append(self.distances[leg]) if outlanding_fix is not None: outlanding_leg = len(fixes) - 1 distances.append(self.determine_outlanding_distance(outlanding_leg, outlanding_fix)) return distances def finished_leg(self, leg, fix1, fix2): """Determines whether leg is finished.""" next_waypoint = self.waypoints[leg + 1] if next_waypoint.is_line: return next_waypoint.crossed_line(fix1, fix2) else: return next_waypoint.outside_sector(fix1) and next_waypoint.inside_sector(fix2)
mut.rs
#[allow(dead_code)] #[derive(Clone, Copy)] struct Book { // `&'static str` is a reference to a string allocated in read only memory
title: &'static str, year: u32, } // This function takes a reference to a book fn borrow_book(book: &Book) { println!("I immutably borrowed {} - {} edition", book.title, book.year); } // This function takes a reference to a mutable book and changes `year` to 2014 fn new_edition(book: &mut Book) { book.year = 2014; println!("I mutably borrowed {} - {} edition", book.title, book.year); } fn main() { // Create an immutable Book named `immutabook` let immutabook = Book { // string literals have type `&'static str` author: "Douglas Hofstadter", title: "Gödel, Escher, Bach", year: 1979, }; // Create a mutable copy of `immutabook` and call it `mutabook` let mut mutabook = immutabook; // Immutably borrow an immutable object borrow_book(&immutabook); // Immutably borrow a mutable object borrow_book(&mutabook); // Borrow a mutable object as mutable new_edition(&mut mutabook); // Error! Cannot borrow an immutable object as mutable new_edition(&mut immutabook); // ИСПРАВЬТЕ ^ Comment out this line }
author: &'static str,
day03part1initial.rs
fn main() { let input = include_str!("../../input/day03.txt"); let topo_map = input .lines() .map(|line| line.chars().collect::<Vec<_>>()) .collect::<Vec<_>>(); let result = traverse(&topo_map, 3, 1); println!("{:?}", result); } fn traverse(tm: &Vec<Vec<char>>, x: usize, y: usize) -> usize { traverse_step(tm, x, y, x, y) } fn traverse_step(tm: &Vec<Vec<char>>, x: usize, y: usize, cx: usize, cy: usize) -> usize { if cy >= tm.len() { return 0; } let cx = cx % tm[0].len(); let hit = if tm[cy][cx] == '#' { 1 } else { 0 };
hit as usize + traverse_step(tm, x, y, cx + x, cy + y) }
nonblocking.rs
use rand::distributions::{Distribution, Uniform}; use std::iter::repeat_with; use vmcircbuffer::nonblocking::Circular; #[test] fn create_many() { let mut v = Vec::new(); for _ in 0..100 { v.push(Circular::new::<u8>().unwrap()); } } #[test] fn zero_size() { let mut w = Circular::new::<u8>().unwrap(); assert!(!w.try_slice().is_empty()); } #[test] fn no_reader() { let mut w = Circular::new::<u8>().unwrap(); let s = w.try_slice(); let l = s.len(); w.produce(l); assert!(!w.try_slice().is_empty()); } #[test] #[should_panic] fn produce_too_much() { let mut w = Circular::new::<u8>().unwrap(); let s = w.try_slice(); let l = s.len(); w.produce(l + 1); } #[test] #[should_panic] fn consume_too_much() { let mut w = Circular::new::<u8>().unwrap(); let mut r = w.add_reader(); let s = w.try_slice(); let l = s.len(); w.produce(l + 1); let s = r.try_slice().unwrap(); let l = s.len(); r.consume(l + 1); } #[test] fn
() { let mut w = Circular::new::<u32>().unwrap(); let s = w.try_slice(); for (i, v) in s.iter_mut().take(200).enumerate() { *v = i as u32; } w.produce(100); let mut r = w.add_reader(); assert_eq!(r.try_slice().unwrap().len(), 0); w.produce(100); assert_eq!(r.try_slice().unwrap().len(), 100); for (i, v) in r.try_slice().unwrap().iter().enumerate() { assert_eq!(*v, 100 + i as u32); } } #[test] fn several_readers() { let mut w = Circular::new::<u32>().unwrap(); let mut r1 = w.add_reader(); let mut r2 = w.add_reader(); for (i, v) in w.try_slice().iter_mut().enumerate() { *v = i as u32; } let all = w.try_slice().len(); assert_eq!(r1.try_slice().unwrap().len(), 0); let l = w.try_slice().len(); w.produce(l); assert_eq!(r2.try_slice().unwrap().len(), all); let _ = r1.try_slice(); r1.consume(100); assert_eq!(r1.try_slice().unwrap().len(), all - 100); for (i, v) in r1.try_slice().unwrap().iter().enumerate() { assert_eq!(*v, 100 + i as u32); } } #[test] fn fuzz_nonblocking() { let mut w = Circular::new::<u32>().unwrap(); let mut r = w.add_reader(); let size = w.try_slice().len(); let input: Vec<u32> = repeat_with(rand::random::<u32>).take(1231233).collect(); let mut rng = rand::thread_rng(); let n_writes_dist = Uniform::from(0..4); let n_samples_dist = Uniform::from(0..size / 2); let mut w_off = 0; let mut r_off = 0; while r_off < input.len() { let n_writes = n_writes_dist.sample(&mut rng); for _ in 0..n_writes { let s = w.try_slice(); let n = std::cmp::min(s.len(), input.len() - w_off); let n = std::cmp::min(n, n_samples_dist.sample(&mut rng)); for (i, v) in s.iter_mut().take(n).enumerate() { *v = input[w_off + i]; } w.produce(n); w_off += n; } let s = r.try_slice().unwrap(); assert_eq!(s.len(), w_off - r_off); for (i, v) in s.iter().enumerate() { assert_eq!(*v, input[r_off + i]); } let l = s.len(); r.consume(l); r_off += l; } } #[test] fn minimal() { let mut w = Circular::new::<u32>().unwrap(); let mut r = w.add_reader(); for v in w.try_slice() { *v = 123; } let l = w.try_slice().len(); w.produce(l); for v in r.try_slice().unwrap() { assert_eq!(*v, 123); } }
late_reader
eval_fingerprint.py
import unittest import logging from footprint.models import Audio from footprint.models import Project import footprint.clients as db import os import random import footprint.tokenizers as tokenizers import footprint.evaluators as evaluators import librosa class TestEvalFingerprintDummy(unittest.TestCase): ''' Tests fingerprint evaluator with dummy database ''' def test_smoke(self): random.seed(30) filename = '/dataset/YTCdataset/letitbe/test.mp3' bucket_name = 'test' p = load_project(cache=True) set_dummy_connection(p) evaluator = evaluators.Fingerprint(p) entries = abs_path('fixtures/fgpt_entries.txt') #queries_txt = abs_path('fixtures/queries.txt') queries_path = abs_path('/cache/queries') evaluator.build(entries) queries_list_path = '/cache/queries.txt' expect_path = '/cache/expect.txt' evaluator.generate_queries(entries, 5, duration=40, queries_path=queries_path, queries_list_path=queries_list_path, results=expect_path) evaluator.match(queries_list_path) result = evaluator.result() r = compare_results(result, expect_path) self.assertEqual(r, 1) # import code; code.interact(local=dict(globals(), **locals())) def compare_results(result, expectation_path): file = open(expectation_path, 'r') fil = file.read() expected = dict([x.split('\t') for x in fil.split('\n')]) file.close() comparisons = [expected[query]==found for query, found in result] return sum(comparisons)/len(comparisons) def
(path): dirname = os.path.dirname(os.path.abspath(__file__)) return os.path.join(dirname, path) def naive_tokenizer_for_chroma(audio): return tokenizers.naive_tokenizer(audio.features['chroma_cens'], pace=30) def feat_chroma_cens(audio): print('running chroma for ', audio.filename) return librosa.feature.chroma_cens(audio.y, audio.sr) def set_dummy_connection(p): cli = db.dummy.Connection(**{'var1': True}) p.set_connection(cli) def load_project(cache=True): p = Project(cache=cache, cache_folder='/cache') p.process_feature('chroma_cens', feat_chroma_cens) p.use_tokenizer('chroma_naive', naive_tokenizer_for_chroma) return p if __name__ == '__main__': unittest.main() # import code; code.interact(local=dict(globals(), **locals())) # python3 -m unittest test.eval_fingerprint.TestEvalFingerprintDummy.test_smoke
abs_path
functions.rs
//! This module defines types and structures for fuzzy logic functions. //! //! Module contains implementation of membership functions and defuzzification functions. //! Also contains factory methods to create most used functions. use crate::set::Set; use num::abs; /// Used to calculate the membership of the given item. /// All membership functions must be this type. pub type MembershipFunction = dyn Fn(f32) -> f32; /// Used to defuzzificate the fuzzy logic inference result. /// All defuzzification functions must be this type. pub type DefuzzFunc = dyn Fn(&Set) -> f32; /// Defines methods to create most used membership functions. /// /// # Examples /// Create triangular function: /// /// ```rust /// use fuzzy_logic::functions::MembershipFactory; /// /// let mem = MembershipFactory::triangular(-15.0, -15.0, 22.0); /// mem(-15.0); // -> 1.0 /// ``` pub struct MembershipFactory; impl MembershipFactory { /// Creates triangular function. pub fn triangular(a: f32, b: f32, c: f32) -> Box<MembershipFunction> { println!("a,b,c={},{},{}", a,b,c); let mut t = 0.0; let xt = -1.9; //2.0; if a <= xt && xt <= b { t = 1.0 - (b - xt) / (b - a) } else if b <= xt && xt <= c { t = 1.0 - (xt - b) / (c - b) } else { t= 0.0 } println!("Triangular, t == {}", t); Box::new(move |x: f32| { if a <= x && x <= b { 1.0 - (b - x) / (b - a) } else if b <= x && x <= c { 1.0 - (x - b) / (c - b) } else { 0.0 } }) } /// Creates trapezoidal function. pub fn trapezoidal(a: f32, b: f32, c: f32, d: f32) -> Box<MembershipFunction> { Box::new(move |x: f32| { if x < a { 0.0 } else if x <= b { (x - a) / (b - a) } else if x <= c { 1.0 } else if x <= d { (d - x) / (d - c) } else { 0.0 } }) } /// Creates sigmoidal function. pub fn sigmoidal(a: f32, c: f32) -> Box<MembershipFunction> { Box::new(move |x: f32| 1.0 / (1.0 + (-1.0 * a * (x - c)).exp())) } /// Creates gaussian function. pub fn gaussian(a: f32, b: f32, c: f32) -> Box<MembershipFunction> { Box::new(move |x: f32| a * (-1.0 * ((x - b).powi(2) / (2.0 * c.powi(2)))).exp()) } /// Creates a singleton function // If value == x returns 1.0 (part of the set) // If value != x returns 0.0 (not part of the set) pub fn
(value: f32) -> Box<MembershipFunction> { //Box::new(move |x: f32| if value == x { 1.0 } else { 0.0 }) let eps = 0.01; //epsilon //Debug, Todor, 15.12.2021 //Box::new(move |x: f32| if num::abs(value - x) < eps { 1.0 } else { 0.0 }) //let xt = value.clone(); //let xt = -1.9/4.0; //-1.9; let xt = -1.9; ///4.0; //-1.9; let mut t = 0.0; println!("value, xt, value-xt < eps = {},{},{}", value, xt, value-xt < eps); if num::abs(value - xt) < eps { t = 1.0}; //#{ 1.0 } else { 0.0 }; println!("singleton if num::abs(value - xt) < eps 1.0 else 0.0; t = ? [{}]", t); let b = Box::new(move |x: f32| if num::abs(value - x) < eps { 1.0 } else { 0.0 }); //CURRENT //let b = Box::new(move |x: f32| if true { 1.0_f32 } else {1.0_f32}); //Send always 1 //That's a closure, x will be known when it's invoked //If //println!("singleton num::abs(value - x)={}, eps={}", num::abs(value - x), eps) b } } /// Defines methods to create most used defuzzification functions. /// /// # Examples /// Create function which calculates center of mass: /// /// ```rust /// use fuzzy_logic::{ /// functions::{DefuzzFactory, MembershipFactory, /// set::Set /// }; /// /// let mem = MembershipFactory::triangular(-15.0, -15.0, 22.0); /// let df = DefuzzFactory::center_of_mass(); /// let set = Set::new_with_mem("Test".to_string(), mem); /// df(&set); /// ``` pub struct DefuzzFactory; impl DefuzzFactory { /// Creates function which calculates center of mass. pub fn center_of_mass() -> Box<DefuzzFunc> { Box::new(|s: &Set| { let sum = s.cache.borrow().iter().fold(0.0, |acc, (_, &v)| acc + v); let prod_sum = s .cache .borrow() .iter() .fold(0.0, |acc, (&k, &v)| acc + k.into_inner() * v); prod_sum / sum }) } } #[cfg(test)] mod test { use super::*; use std::f32; #[test] fn sigmoidal() { let steepness = 2.0; for i in -100..100 { let midpoint = i as f32; let f = MembershipFactory::sigmoidal(steepness, midpoint); let diff = (0.5 - f(midpoint)).abs(); assert!(diff <= f32::EPSILON); } } }
singleton
applicationservice.go
/* Copyright (c) 2015, Alcatel-Lucent Inc All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package vspk import "github.com/nuagenetworks/go-bambou/bambou" // ApplicationServiceIdentity represents the Identity of the object var ApplicationServiceIdentity = bambou.Identity{ Name: "applicationservice", Category: "applicationservices", } // ApplicationServicesList represents a list of ApplicationServices type ApplicationServicesList []*ApplicationService // ApplicationServicesAncestor is the interface of an ancestor of a ApplicationService must implement. type ApplicationServicesAncestor interface { ApplicationServices(*bambou.FetchingInfo) (ApplicationServicesList, *bambou.Error) CreateApplicationServices(*ApplicationService) *bambou.Error } // ApplicationService represents the model of a applicationservice type ApplicationService struct { ID string `json:"ID,omitempty"` ParentID string `json:"parentID,omitempty"` ParentType string `json:"parentType,omitempty"` Owner string `json:"owner,omitempty"` DSCP string `json:"DSCP,omitempty"` Name string `json:"name,omitempty"` LastUpdatedBy string `json:"lastUpdatedBy,omitempty"` Description string `json:"description,omitempty"` DestinationPort string `json:"destinationPort,omitempty"` Direction string `json:"direction,omitempty"` EntityScope string `json:"entityScope,omitempty"` SourcePort string `json:"sourcePort,omitempty"` Protocol string `json:"protocol,omitempty"` EtherType string `json:"etherType,omitempty"` ExternalID string `json:"externalID,omitempty"` } // NewApplicationService returns a new *ApplicationService func NewApplicationService() *ApplicationService { return &ApplicationService{ EtherType: "0x0800", Direction: "REFLEXIVE", Protocol: "6", DSCP: "*", } } // Identity returns the Identity of the object. func (o *ApplicationService) Identity() bambou.Identity { return ApplicationServiceIdentity } // Identifier returns the value of the object's unique identifier. func (o *ApplicationService) Identifier() string { return o.ID } // SetIdentifier sets the value of the object's unique identifier. func (o *ApplicationService) SetIdentifier(ID string) { o.ID = ID } // Fetch retrieves the ApplicationService from the server func (o *ApplicationService) Fetch() *bambou.Error { return bambou.CurrentSession().FetchEntity(o) } // Save saves the ApplicationService into the server func (o *ApplicationService) Save() *bambou.Error { return bambou.CurrentSession().SaveEntity(o) } // Delete deletes the ApplicationService from the server func (o *ApplicationService) Delete() *bambou.Error { return bambou.CurrentSession().DeleteEntity(o) } // Metadatas retrieves the list of child Metadatas of the ApplicationService func (o *ApplicationService) Metadatas(info *bambou.FetchingInfo) (MetadatasList, *bambou.Error) { var list MetadatasList err := bambou.CurrentSession().FetchChildren(o, MetadataIdentity, &list, info) return list, err } // CreateMetadata creates a new child Metadata under the ApplicationService func (o *ApplicationService) CreateMetadata(child *Metadata) *bambou.Error { return bambou.CurrentSession().CreateChild(o, child) } // GlobalMetadatas retrieves the list of child GlobalMetadatas of the ApplicationService func (o *ApplicationService) GlobalMetadatas(info *bambou.FetchingInfo) (GlobalMetadatasList, *bambou.Error) { var list GlobalMetadatasList err := bambou.CurrentSession().FetchChildren(o, GlobalMetadataIdentity, &list, info) return list, err } // CreateGlobalMetadata creates a new child GlobalMetadata under the ApplicationService func (o *ApplicationService) CreateGlobalMetadata(child *GlobalMetadata) *bambou.Error { return bambou.CurrentSession().CreateChild(o, child) } // EventLogs retrieves the list of child EventLogs of the ApplicationService func (o *ApplicationService) EventLogs(info *bambou.FetchingInfo) (EventLogsList, *bambou.Error) { var list EventLogsList err := bambou.CurrentSession().FetchChildren(o, EventLogIdentity, &list, info) return list, err } // CreateEventLog creates a new child EventLog under the ApplicationService func (o *ApplicationService) CreateEventLog(child *EventLog) *bambou.Error { return bambou.CurrentSession().CreateChild(o, child) }
zz_generated.deepcopy.go
//go:build !ignore_autogenerated // +build !ignore_autogenerated /* Copyright 2022 Tommaso Doninelli. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by controller-gen. DO NOT EDIT. package v1alpha1 import ( "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AwsSecret) DeepCopyInto(out *AwsSecret) { *out = *in } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsSecret. func (in *AwsSecret) DeepCopy() *AwsSecret { if in == nil { return nil } out := new(AwsSecret) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DnsRecord) DeepCopyInto(out *DnsRecord) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DnsRecord. func (in *DnsRecord) DeepCopy() *DnsRecord { if in == nil { return nil } out := new(DnsRecord) in.DeepCopyInto(out) return out
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *DnsRecord) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DnsRecordList) DeepCopyInto(out *DnsRecordList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DnsRecord, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DnsRecordList. func (in *DnsRecordList) DeepCopy() *DnsRecordList { if in == nil { return nil } out := new(DnsRecordList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *DnsRecordList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DnsRecordSpec) DeepCopyInto(out *DnsRecordSpec) { *out = *in in.Route53Records.DeepCopyInto(&out.Route53Records) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DnsRecordSpec. func (in *DnsRecordSpec) DeepCopy() *DnsRecordSpec { if in == nil { return nil } out := new(DnsRecordSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DnsRecordStatus) DeepCopyInto(out *DnsRecordStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]v1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DnsRecordStatus. func (in *DnsRecordStatus) DeepCopy() *DnsRecordStatus { if in == nil { return nil } out := new(DnsRecordStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Route53Record) DeepCopyInto(out *Route53Record) { *out = *in out.AwsSecrets = in.AwsSecrets if in.ResourceRecords != nil { in, out := &in.ResourceRecords, &out.ResourceRecords *out = make([]string, len(*in)) copy(*out, *in) } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Route53Record. func (in *Route53Record) DeepCopy() *Route53Record { if in == nil { return nil } out := new(Route53Record) in.DeepCopyInto(out) return out }
}
helpers.go
// (c) 2019-2020, Dijets Desk, Inc. All rights reserved. // See the file LICENSE for licensing terms. package database import ( "encoding/binary" "errors" "fmt" "time" "github.com/lasthyphen/avalanchego/ids" ) var errWrongSize = errors.New("value has unexpected size") const ( // kvPairOverhead is an estimated overhead for a kv pair in a database. kvPairOverhead = 8 // bytes ) func PutID(db KeyValueWriter, key []byte, val ids.ID) error { return db.Put(key, val[:]) } func GetID(db KeyValueReader, key []byte) (ids.ID, error) { b, err := db.Get(key) if err != nil { return ids.ID{}, err } return ids.ToID(b) } func ParseID(b []byte) (ids.ID, error) { return ids.ToID(b) } func PutUInt64(db KeyValueWriter, key []byte, val uint64) error
func GetUInt64(db KeyValueReader, key []byte) (uint64, error) { b, err := db.Get(key) if err != nil { return 0, err } return ParseUInt64(b) } func PackUInt64(val uint64) []byte { bytes := make([]byte, 8) binary.BigEndian.PutUint64(bytes, val) return bytes } func ParseUInt64(b []byte) (uint64, error) { if len(b) != 8 { return 0, errWrongSize } return binary.BigEndian.Uint64(b), nil } func PutUInt32(db KeyValueWriter, key []byte, val uint32) error { b := PackUInt32(val) return db.Put(key, b) } func GetUInt32(db KeyValueReader, key []byte) (uint32, error) { b, err := db.Get(key) if err != nil { return 0, err } return ParseUInt32(b) } func PackUInt32(val uint32) []byte { bytes := make([]byte, 4) binary.BigEndian.PutUint32(bytes, val) return bytes } func ParseUInt32(b []byte) (uint32, error) { if len(b) != 4 { return 0, errWrongSize } return binary.BigEndian.Uint32(b), nil } func PutTimestamp(db KeyValueWriter, key []byte, val time.Time) error { valBytes, err := val.MarshalBinary() if err != nil { return err } return db.Put(key, valBytes) } func GetTimestamp(db KeyValueReader, key []byte) (time.Time, error) { b, err := db.Get(key) if err != nil { return time.Time{}, err } return ParseTimestamp(b) } func ParseTimestamp(b []byte) (time.Time, error) { val := time.Time{} if err := val.UnmarshalBinary(b); err != nil { return time.Time{}, err } return val, nil } func PutBool(db KeyValueWriter, key []byte, b bool) error { if b { return db.Put(key, []byte{1}) } return db.Put(key, []byte{0}) } func GetBool(db KeyValueReader, key []byte) (bool, error) { b, err := db.Get(key) switch { case err != nil: return false, err case len(b) != 1: return false, fmt.Errorf("length should be 1 but is %d", len(b)) case b[0] != 0 && b[0] != 1: return false, fmt.Errorf("should be 0 or 1 but is %v", b[0]) } return b[0] == 1, nil } func Size(db Iteratee) (int, error) { iterator := db.NewIterator() defer iterator.Release() size := 0 for iterator.Next() { size += len(iterator.Key()) + len(iterator.Value()) + kvPairOverhead } return size, iterator.Error() }
{ b := PackUInt64(val) return db.Put(key, b) }
endpoint.go
package s3control import ( "fmt" "strings" "github.com/aws/aws-sdk-go/aws" awsarn "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/s3shared" "github.com/aws/aws-sdk-go/internal/s3shared/arn" ) const ( // outpost id header outpostIDHeader = "x-amz-outpost-id" // account id header accountIDHeader = "x-amz-account-id" ) // Used by shapes with members decorated as endpoint ARN. func parseEndpointARN(v string) (arn.Resource, error) { return arn.ParseResource(v, resourceParser) } func resourceParser(a awsarn.ARN) (arn.Resource, error) { resParts := arn.SplitResource(a.Resource) switch resParts[0] { case "outpost": return arn.ParseOutpostARNResource(a, resParts[1:]) default: return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"} } } func endpointHandler(req *request.Request) { // For special case "CreateBucket" and "ListRegionalBuckets" operation outpostIDEndpoint, ok := req.Params.(endpointOutpostIDGetter) if ok && outpostIDEndpoint.hasOutpostID() { outpostID, err := outpostIDEndpoint.getOutpostID() if err != nil { req.Error = fmt.Errorf("expected outpost ID to be supported, %v", err) } if len(strings.TrimSpace(outpostID)) == 0 { return } updateRequestOutpostIDEndpoint(req) return } endpoint, ok := req.Params.(endpointARNGetter) if !ok || !endpoint.hasEndpointARN() { return } resource, err := endpoint.getEndpointARN() if err != nil { req.Error = s3shared.NewInvalidARNError(nil, err) return } // Add account-id header for the request if not present. // SDK must always send the x-amz-account-id header for all requests // where an accountId has been extracted from an ARN or the accountId field modeled as a header. if h := req.HTTPRequest.Header.Get(accountIDHeader); len(h) == 0 { req.HTTPRequest.Header.Add(accountIDHeader, resource.GetARN().AccountID) } switch tv := resource.(type) { case arn.OutpostAccessPointARN: // Add outpostID header req.HTTPRequest.Header.Add(outpostIDHeader, tv.OutpostID) // update arnable field to resource value updatedInput, err := endpoint.updateArnableField(tv.AccessPointName) if err != nil { req.Error = err return } // update request params to use modified ARN field value, if not nil if updatedInput != nil { req.Params = updatedInput } // update request for outpost access point endpoint err = updateRequestOutpostAccessPointEndpoint(req, tv) if err != nil { req.Error = err } case arn.OutpostBucketARN: // Add outpostID header req.HTTPRequest.Header.Add(outpostIDHeader, tv.OutpostID) // update arnable field to resource value updatedInput, err := endpoint.updateArnableField(tv.BucketName) if err != nil { req.Error = err return } // update request params to use modified ARN field value, if not nil if updatedInput != nil { req.Params = updatedInput } // update request for outpost bucket endpoint err = updateRequestOutpostBucketEndpoint(req, tv) if err != nil { req.Error = err } default: req.Error = s3shared.NewInvalidARNError(resource, nil) } } // updateRequestOutpostIDEndpoint is special customization to be applied for operations // CreateBucket, ListRegionalBuckets which must resolve endpoint to s3-outposts.{region}.amazonaws.com // with region as client region and signed by s3-control if an outpost id is provided. func updateRequestOutpostIDEndpoint(request *request.Request) { cfgRegion := aws.StringValue(request.Config.Region) if !hasCustomEndpoint(request) { serviceEndpointLabel := "s3-outposts." // request url request.HTTPRequest.URL.Host = serviceEndpointLabel + cfgRegion + ".amazonaws.com" // disable the host prefix for outpost access points request.Config.DisableEndpointHostPrefix = aws.Bool(true) } // signer redirection request.ClientInfo.SigningName = "s3-outposts" request.ClientInfo.SigningRegion = cfgRegion } func updateRequestOutpostAccessPointEndpoint(req *request.Request, accessPoint arn.OutpostAccessPointARN) error { // validate Outpost endpoint if err := validateOutpostEndpoint(req, accessPoint); err != nil { return err } // disable the host prefix for outpost access points req.Config.DisableEndpointHostPrefix = aws.Bool(true) if err := outpostAccessPointEndpointBuilder(accessPoint).build(req); err != nil { return err } return nil } func updateRequestOutpostBucketEndpoint(req *request.Request, bucketResource arn.OutpostBucketARN) error { // validate Outpost endpoint if err := validateOutpostEndpoint(req, bucketResource); err != nil { return err } // disable the host prefix for outpost bucket. req.Config.DisableEndpointHostPrefix = aws.Bool(true) if err := outpostBucketResourceEndpointBuilder(bucketResource).build(req); err != nil { return err } return nil } // validate request resource for retrieving endpoint func validateEndpointRequestResource(req *request.Request, resource arn.Resource) error { resReq := s3shared.ResourceRequest{Request: req, Resource: resource} if len(resReq.Request.ClientInfo.PartitionID) != 0 && resReq.IsCrossPartition() { return s3shared.NewClientPartitionMismatchError(resource, req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) } if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() { return s3shared.NewClientRegionMismatchError(resource, req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) } // Accelerate not supported if aws.BoolValue(req.Config.S3UseAccelerate) { return s3shared.NewClientConfiguredForAccelerateError(resource, req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) } return nil } // validations for fetching outpost endpoint func validateOutpostEndpoint(req *request.Request, resource arn.Resource) error { resReq := s3shared.ResourceRequest{ Request: req, Resource: resource, } if err := validateEndpointRequestResource(req, resource); err != nil { return err
// resource configured with FIPS as region is not supported by outposts if resReq.UseFIPS() { return s3shared.NewFIPSConfigurationError(resource, req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) } // DualStack not supported if aws.BoolValue(req.Config.UseDualStack) { return s3shared.NewClientConfiguredForDualStackError(resource, req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) } return nil }
}
options.rs
use std::{convert::TryFrom, fs, io::ErrorKind, path::PathBuf, str::FromStr}; use anyhow::{anyhow, Error, Result}; use lazy_static::lazy_static; use lychee_lib::{Base, Input}; use serde::Deserialize; use structopt::{clap::crate_version, StructOpt}; const METHOD: &str = "get"; const TIMEOUT: usize = 20; const MAX_CONCURRENCY: usize = 128; const MAX_REDIRECTS: usize = 10; const USER_AGENT: &str = concat!("lychee/", crate_version!()); // this exists because structopt requires `&str` type values for defaults // (we can't use e.g. `TIMEOUT` or `timeout()` which gets created for serde) lazy_static! { static ref TIMEOUT_STR: String = TIMEOUT.to_string(); static ref MAX_CONCURRENCY_STR: String = MAX_CONCURRENCY.to_string(); static ref MAX_REDIRECTS_STR: String = MAX_REDIRECTS.to_string(); } #[derive(Debug, Deserialize)] pub(crate) enum
{ String, Json, } impl FromStr for Format { type Err = Error; fn from_str(format: &str) -> Result<Self, Self::Err> { match format { "string" => Ok(Format::String), "json" => Ok(Format::Json), _ => Err(anyhow!("Could not parse format {}", format)), } } } impl Default for Format { fn default() -> Self { Format::String } } // Macro for generating default functions to be used by serde macro_rules! default_function { ( $( $name:ident : $T:ty = $e:expr; )* ) => { $( #[allow(clippy::missing_const_for_fn)] fn $name() -> $T { $e } )* }; } // Generate the functions for serde defaults default_function! { max_redirects: usize = MAX_REDIRECTS; max_concurrency: usize = MAX_CONCURRENCY; user_agent: String = USER_AGENT.to_string(); timeout: usize = TIMEOUT; method: String = METHOD.to_string(); } // Macro for merging configuration values macro_rules! fold_in { ( $cli:ident , $toml:ident ; $( $key:ident : $default:expr; )* ) => { $( if $cli.$key == $default && $toml.$key != $default { $cli.$key = $toml.$key; } )* }; } fn parse_base(src: &str) -> Result<Base, lychee_lib::ErrorKind> { Base::try_from(src) } #[derive(Debug, StructOpt)] #[structopt( name = "lychee", about = "A glorious link checker.\n\nProject home page: https://github.com/lycheeverse/lychee" )] pub(crate) struct LycheeOptions { /// The inputs (where to get links to check from). /// These can be: files (e.g. `README.md`), file globs (e.g. `"~/git/*/README.md"`), /// remote URLs (e.g. `https://example.org/README.md`) or standard input (`-`). /// NOTE: Use `--` to separate inputs from options that allow multiple arguments. #[structopt(name = "inputs", required = true)] raw_inputs: Vec<String>, /// Configuration file to use #[structopt(short, long = "config", default_value = "./lychee.toml")] pub(crate) config_file: String, #[structopt(flatten)] pub(crate) config: Config, } impl LycheeOptions { // This depends on config, which is why a method is required (we could // accept a `Vec<Input>` in `LycheeOptions` and do the conversion there, // but we'd get no access to `glob_ignore_case`. /// Get parsed inputs from options. pub(crate) fn inputs(&self) -> Vec<Input> { self.raw_inputs .iter() .map(|s| Input::new(s, self.config.glob_ignore_case)) .collect() } } #[allow(clippy::struct_excessive_bools)] #[derive(Debug, Deserialize, StructOpt)] pub(crate) struct Config { /// Verbose program output #[structopt(short, long)] #[serde(default)] pub(crate) verbose: bool, /// Do not show progress bar. /// This is recommended for non-interactive shells (e.g. for continuous integration) #[structopt(short, long, verbatim_doc_comment)] #[serde(default)] pub(crate) no_progress: bool, /// Don't perform any link checking. /// Instead, dump all the links extracted from inputs that would be checked #[structopt(long)] #[serde(default)] pub(crate) dump: bool, /// Maximum number of allowed redirects #[structopt(short, long, default_value = &MAX_REDIRECTS_STR)] #[serde(default = "max_redirects")] pub(crate) max_redirects: usize, /// Maximum number of concurrent network requests #[structopt(long, default_value = &MAX_CONCURRENCY_STR)] #[serde(default = "max_concurrency")] pub(crate) max_concurrency: usize, /// Number of threads to utilize. /// Defaults to number of cores available to the system #[structopt(short = "T", long)] #[serde(default)] pub(crate) threads: Option<usize>, /// User agent #[structopt(short, long, default_value = USER_AGENT)] #[serde(default = "user_agent")] pub(crate) user_agent: String, /// Proceed for server connections considered insecure (invalid TLS) #[structopt(short, long)] #[serde(default)] pub(crate) insecure: bool, /// Only test links with the given schemes (e.g. http and https) #[structopt(short, long)] #[serde(default)] pub(crate) scheme: Vec<String>, /// Only check local files and block network requests. #[structopt(long)] #[serde(default)] pub(crate) offline: bool, /// URLs to check (supports regex). Has preference over all excludes. #[structopt(long)] #[serde(default)] pub(crate) include: Vec<String>, /// Exclude URLs from checking (supports regex) #[structopt(long)] #[serde(default)] pub(crate) exclude: Vec<String>, /// A file or files that contains URLs to exclude from checking #[structopt(long)] #[serde(default)] pub(crate) exclude_file: Vec<String>, /// Exclude all private IPs from checking. /// Equivalent to `--exclude-private --exclude-link-local --exclude-loopback` #[structopt(short = "E", long, verbatim_doc_comment)] #[serde(default)] pub(crate) exclude_all_private: bool, /// Exclude private IP address ranges from checking #[structopt(long)] #[serde(default)] pub(crate) exclude_private: bool, /// Exclude link-local IP address range from checking #[structopt(long)] #[serde(default)] pub(crate) exclude_link_local: bool, /// Exclude loopback IP address range and localhost from checking #[structopt(long)] #[serde(default)] pub(crate) exclude_loopback: bool, /// Exclude all mail addresses from checking #[structopt(long)] #[serde(default)] pub(crate) exclude_mail: bool, /// Custom request headers #[structopt(short, long)] #[serde(default)] pub(crate) headers: Vec<String>, /// Comma-separated list of accepted status codes for valid links #[structopt(short, long)] #[serde(default)] pub(crate) accept: Option<String>, /// Website timeout from connect to response finished #[structopt(short, long, default_value = &TIMEOUT_STR)] #[serde(default = "timeout")] pub(crate) timeout: usize, /// Request method // Using `-X` as a short param similar to curl #[structopt(short = "X", long, default_value = METHOD)] #[serde(default = "method")] pub(crate) method: String, /// Base URL or website root directory to check relative URLs /// e.g. https://example.org or `/path/to/public` #[structopt(short, long, parse(try_from_str = parse_base))] #[serde(default)] pub(crate) base: Option<Base>, /// Basic authentication support. E.g. `username:password` #[structopt(long)] #[serde(default)] pub(crate) basic_auth: Option<String>, /// GitHub API token to use when checking github.com links, to avoid rate limiting #[structopt(long, env = "GITHUB_TOKEN")] #[serde(default)] pub(crate) github_token: Option<String>, /// Skip missing input files (default is to error if they don't exist) #[structopt(long)] #[serde(default)] pub(crate) skip_missing: bool, /// Ignore case when expanding filesystem path glob inputs #[structopt(long)] #[serde(default)] pub(crate) glob_ignore_case: bool, /// Output file of status report #[structopt(short, long, parse(from_os_str))] #[serde(default)] pub(crate) output: Option<PathBuf>, /// Output file format of status report (json, string) #[structopt(short, long, default_value = "string")] #[serde(default)] pub(crate) format: Format, /// When HTTPS is available, treat HTTP links as errors #[structopt(long)] #[serde(default)] pub(crate) require_https: bool, } impl Config { /// Load configuration from a file pub(crate) fn load_from_file(path: &str) -> Result<Option<Config>> { // Read configuration file let result = fs::read(path); // Ignore a file not found error let contents = match result { Ok(c) => c, Err(e) => { return match e.kind() { ErrorKind::NotFound => Ok(None), _ => Err(Error::from(e)), } } }; Ok(Some(toml::from_slice(&contents)?)) } /// Merge the configuration from TOML into the CLI configuration pub(crate) fn merge(&mut self, toml: Config) { fold_in! { // Destination and source configs self, toml; // Keys with defaults to assign verbose: false; no_progress: false; max_redirects: MAX_REDIRECTS; max_concurrency: MAX_CONCURRENCY; threads: None; user_agent: USER_AGENT; insecure: false; scheme: Vec::<String>::new(); include: Vec::<String>::new(); exclude: Vec::<String>::new(); exclude_file: Vec::<String>::new(); exclude_all_private: false; exclude_private: false; exclude_link_local: false; exclude_loopback: false; exclude_mail: false; headers: Vec::<String>::new(); accept: None; timeout: TIMEOUT; method: METHOD; base: None; basic_auth: None; github_token: None; skip_missing: false; glob_ignore_case: false; output: None; require_https: false; } } }
Format
test_loss.py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division from __future__ import print_function import unittest import os import six import numpy as np import shutil import copy import paddle from paddle import fluid from paddle.incubate.hapi.model import Model, Input from paddle.incubate.hapi.loss import CrossEntropy, SoftmaxWithCrossEntropy def stable_softmax(x): """Compute the softmax of vector x in a numerically stable way.""" # clip to shiftx, otherwise, when calc loss with # log(exp(shiftx)), may get log(0)=INF shiftx = (x - np.max(x)).clip(-64.) exps = np.exp(shiftx) return exps / np.sum(exps) def randomize_probability(batch_size, class_num, dtype='float32'): prob = np.random.uniform( 0.1, 1.0, size=(batch_size, class_num)).astype(dtype) prob_sum = prob.sum(axis=1) for i in six.moves.xrange(len(prob)): prob[i] /= prob_sum[i] return prob def numpy_ce(x, label): return np.asmatrix( [[-np.log(x[i][label[i][0]])] for i in range(x.shape[0])], dtype="float32").mean() class TestLoss(unittest.TestCase): def
(self): class_num = 100 batch_size = 128 inputs = [randomize_probability(128, class_num) for _ in range(2)] labels = [ np.random.randint( 0, class_num, (batch_size, 1), dtype="int64") for _ in range(2) ] gt_out = [numpy_ce(inputs[i], labels[i]) for i in range(2)] fluid.enable_dygraph() cross_entropy = CrossEntropy() out = cross_entropy( [fluid.dygraph.to_variable(x) for x in inputs], [fluid.dygraph.to_variable(label) for label in labels]) out = [o.numpy() for o in out] for o, g in zip(out, gt_out): np.testing.assert_allclose(o, g, atol=1e-5) def test_soft_cross_entronpy(self): class_num = 100 batch_size = 128 inputs = [randomize_probability(128, class_num) for _ in range(2)] labels = [ np.random.randint( 0, class_num, (batch_size, 1), dtype="int64") for _ in range(2) ] fluid.enable_dygraph() softmax_cross_entropy = SoftmaxWithCrossEntropy() softmax_cross_entropy( [fluid.dygraph.to_variable(x) for x in inputs], [fluid.dygraph.to_variable(label) for label in labels]) softmax_cross_entropy = SoftmaxWithCrossEntropy(average=False) inputs = [randomize_probability(128, class_num)] labels = [ np.random.randint( 0, class_num, (batch_size, 1), dtype="int64") ] softmax_cross_entropy([fluid.dygraph.to_variable(x) for x in inputs], fluid.dygraph.to_variable(labels[0])) if __name__ == '__main__': unittest.main()
test_cross_entropy
utils.go
package pow import ( "tinychain/common" "tinychain/core/types" "math/big" "encoding/binary" "math" "encoding/json" "time" ) func computeHash(nonce uint64, header *types.Header) ([]byte, error) { var nonceBytes []byte binary.BigEndian.PutUint64(nonceBytes, nonce) hash := header.HashNoConsensus().Bytes() hash = append(hash, nonceBytes...) return common.Sha256(hash).Bytes(), nil } // computeNewDiff computed new difficulty with old diff and time duration func computeNewDiff(currDiff uint64, curr *types.Block, old *types.Block) uint64 { duration := time.Duration(new(big.Int).Sub(curr.Time(), old.Time()).Int64()) week := 7 * 24 * time.Hour if duration < week/4 { // if duration is lower than 1/4 week time, set to 1/4 week time duration = week / 4 } else if duration > week*4 { // if duration is larger than 4 times of a week time, set to 4. week time. duration = week * 4 } oldTarget := computeTarget(currDiff) total := new(big.Int).Mul(oldTarget, new(big.Int).SetInt64(int64(duration))) newTarget := total.Div(total, new(big.Int).SetInt64(int64(week))) maxTarget := new(big.Int).SetUint64(math.MaxUint64) // if larger than max target if newTarget.Cmp(maxTarget) == 1 { newTarget = maxTarget } return maxTarget.Div(maxTarget, newTarget).Uint64() } func computeTarget(difficulty uint64) *big.Int {
func decodeConsensusInfo(d []byte) (*consensusInfo, error) { ci := &consensusInfo{} err := json.Unmarshal(d, ci) if err != nil { return nil, err } return ci, nil }
maxTarget := new(big.Int).SetUint64(math.MaxUint64) return maxTarget.Div(maxTarget, new(big.Int).SetUint64(difficulty)) }
payload.go
package payload import ( "github.com/iotaledger/hive.go/marshalutil" ) const ( // ObjectName defines the name of the data object. ObjectName = "data" ) func init() { // register the generic unmarshaler SetGenericUnmarshalerFactory(GenericPayloadUnmarshalerFactory) // register the generic data payload type RegisterType(DataType, ObjectName, GenericPayloadUnmarshalerFactory(DataType)) } // Payload represents some kind of payload of data which only gains meaning by having // corresponding node logic processing payloads of a given type. type Payload interface { // Type returns the type of the payload. Type() Type // Bytes returns the payload bytes. Bytes() []byte // Unmarshal unmarshals the payload from the given bytes. Unmarshal(bytes []byte) error // String returns a human-friendly representation of the payload. String() string } // FromBytes unmarshals bytes into a payload. func
(bytes []byte) (result Payload, consumedBytes int, err error) { // initialize helper marshalUtil := marshalutil.New(bytes) // calculate result payloadType, err := marshalUtil.ReadUint32() if err != nil { return } payloadSize, err := marshalUtil.ReadUint32() if err != nil { return } marshalUtil.ReadSeek(marshalUtil.ReadOffset() - marshalutil.UINT32_SIZE*2) payloadBytes, err := marshalUtil.ReadBytes(int(payloadSize) + 8) if err != nil { return } readOffset := marshalUtil.ReadOffset() result, err = GetUnmarshaler(payloadType)(payloadBytes) if err != nil { // fallback to the generic unmarshaler if registered type fails to unmarshal marshalUtil.ReadSeek(readOffset) result, err = GenericPayloadUnmarshalerFactory(payloadType)(payloadBytes) if err != nil { return } } // return the number of bytes we processed consumedBytes = marshalUtil.ReadOffset() return } // Parse parses a payload by using the given marshal util. func Parse(marshalUtil *marshalutil.MarshalUtil) (Payload, error) { if payload, err := marshalUtil.Parse(func(data []byte) (interface{}, int, error) { return FromBytes(data) }); err != nil { return nil, err } else { return payload.(Payload), nil } }
FromBytes
winx64.rs
//! Unwind information for Windows x64 ABI. use crate::ir::{Function, InstructionData, Opcode, ValueLoc}; use crate::isa::x86::registers::{FPR, GPR, RU}; use crate::isa::{ unwind::winx64::{UnwindCode, UnwindInfo}, CallConv, RegUnit, TargetIsa, }; use crate::result::{CodegenError, CodegenResult}; use alloc::vec::Vec; use log::warn; pub(crate) fn create_unwind_info( func: &Function, isa: &dyn TargetIsa, ) -> CodegenResult<Option<UnwindInfo>> { // Only Windows fastcall is supported for unwind information if func.signature.call_conv != CallConv::WindowsFastcall || func.prologue_end.is_none() { return Ok(None); } let prologue_end = func.prologue_end.unwrap(); let entry_block = func.layout.entry_block().expect("missing entry block"); // Stores the stack size when SP is not adjusted via an immediate value let mut stack_size = None; let mut prologue_size = 0; let mut unwind_codes = Vec::new(); let mut found_end = false; for (offset, inst, size) in func.inst_offsets(entry_block, &isa.encoding_info()) { // x64 ABI prologues cannot exceed 255 bytes in length if (offset + size) > 255 { warn!("function prologues cannot exceed 255 bytes in size for Windows x64"); return Err(CodegenError::CodeTooLarge); } prologue_size += size; let unwind_offset = (offset + size) as u8; match func.dfg[inst] { InstructionData::Unary { opcode, arg } => { match opcode { Opcode::X86Push => { unwind_codes.push(UnwindCode::PushRegister { offset: unwind_offset, reg: GPR.index_of(func.locations[arg].unwrap_reg()) as u8, }); } Opcode::AdjustSpDown => { let stack_size = stack_size.expect("expected a previous stack size instruction"); // This is used when calling a stack check function // We need to track the assignment to RAX which has the size of the stack unwind_codes.push(UnwindCode::StackAlloc { offset: unwind_offset, size: stack_size, }); } _ => {} } } InstructionData::UnaryImm { opcode, imm } => { match opcode { Opcode::Iconst => { let imm: i64 = imm.into(); assert!(imm <= core::u32::MAX as i64); assert!(stack_size.is_none()); // This instruction should only appear in a prologue to pass an // argument of the stack size to a stack check function. // Record the stack size so we know what it is when we encounter the adjustment // instruction (which will adjust via the register assigned to this instruction). stack_size = Some(imm as u32); } Opcode::AdjustSpDownImm => { let imm: i64 = imm.into(); assert!(imm <= core::u32::MAX as i64); stack_size = Some(imm as u32); unwind_codes.push(UnwindCode::StackAlloc { offset: unwind_offset, size: imm as u32, }); } _ => {} } } InstructionData::Store { opcode: Opcode::Store, args: [arg1, arg2], offset, .. } => { if let (ValueLoc::Reg(src), ValueLoc::Reg(dst)) = (func.locations[arg1], func.locations[arg2]) { // If this is a save of an FPR, record an unwind operation // Note: the stack_offset here is relative to an adjusted SP if dst == (RU::rsp as RegUnit) && FPR.contains(src) { let offset: i32 = offset.into(); unwind_codes.push(UnwindCode::SaveXmm { offset: unwind_offset, reg: src as u8, stack_offset: offset as u32, }); } } } _ => {} }; if inst == prologue_end { found_end = true; break; } } assert!(found_end); Ok(Some(UnwindInfo { flags: 0, // this assumes cranelift functions have no SEH handlers prologue_size: prologue_size as u8, frame_register: None, frame_register_offset: 0, unwind_codes, })) } #[cfg(test)] mod tests { use super::*; use crate::cursor::{Cursor, FuncCursor}; use crate::ir::{ExternalName, InstBuilder, Signature, StackSlotData, StackSlotKind}; use crate::isa::{lookup, CallConv}; use crate::settings::{builder, Flags}; use crate::Context; use std::str::FromStr; use target_lexicon::triple; #[test] fn
() { let isa = lookup(triple!("x86_64")) .expect("expect x86 ISA") .finish(Flags::new(builder())); let mut context = Context::for_function(create_function(CallConv::SystemV, None)); context.compile(&*isa).expect("expected compilation"); assert_eq!( create_unwind_info(&context.func, &*isa).expect("can create unwind info"), None ); } #[test] fn test_small_alloc() { let isa = lookup(triple!("x86_64")) .expect("expect x86 ISA") .finish(Flags::new(builder())); let mut context = Context::for_function(create_function( CallConv::WindowsFastcall, Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 64)), )); context.compile(&*isa).expect("expected compilation"); let unwind = create_unwind_info(&context.func, &*isa) .expect("can create unwind info") .expect("expected unwind info"); assert_eq!( unwind, UnwindInfo { flags: 0, prologue_size: 9, frame_register: None, frame_register_offset: 0, unwind_codes: vec![ UnwindCode::PushRegister { offset: 2, reg: GPR.index_of(RU::rbp.into()) as u8 }, UnwindCode::StackAlloc { offset: 9, size: 64 } ] } ); assert_eq!(unwind.emit_size(), 8); let mut buf = [0u8; 8]; unwind.emit(&mut buf); assert_eq!( buf, [ 0x01, // Version and flags (version 1, no flags) 0x09, // Prologue size 0x02, // Unwind code count (1 for stack alloc, 1 for push reg) 0x00, // Frame register + offset (no frame register) 0x09, // Prolog offset 0x72, // Operation 2 (small stack alloc), size = 0xB slots (e.g. (0x7 * 8) + 8 = 64 bytes) 0x02, // Prolog offset 0x50, // Operation 0 (save nonvolatile register), reg = 5 (RBP) ] ); } #[test] fn test_medium_alloc() { let isa = lookup(triple!("x86_64")) .expect("expect x86 ISA") .finish(Flags::new(builder())); let mut context = Context::for_function(create_function( CallConv::WindowsFastcall, Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 10000)), )); context.compile(&*isa).expect("expected compilation"); let unwind = create_unwind_info(&context.func, &*isa) .expect("can create unwind info") .expect("expected unwind info"); assert_eq!( unwind, UnwindInfo { flags: 0, prologue_size: 27, frame_register: None, frame_register_offset: 0, unwind_codes: vec![ UnwindCode::PushRegister { offset: 2, reg: GPR.index_of(RU::rbp.into()) as u8 }, UnwindCode::StackAlloc { offset: 27, size: 10000 } ] } ); assert_eq!(unwind.emit_size(), 12); let mut buf = [0u8; 12]; unwind.emit(&mut buf); assert_eq!( buf, [ 0x01, // Version and flags (version 1, no flags) 0x1B, // Prologue size 0x03, // Unwind code count (2 for stack alloc, 1 for push reg) 0x00, // Frame register + offset (no frame register) 0x1B, // Prolog offset 0x01, // Operation 1 (large stack alloc), size is scaled 16-bits (info = 0) 0xE2, // Low size byte 0x04, // High size byte (e.g. 0x04E2 * 8 = 10000 bytes) 0x02, // Prolog offset 0x50, // Operation 0 (push nonvolatile register), reg = 5 (RBP) 0x00, // Padding 0x00, // Padding ] ); } #[test] fn test_large_alloc() { let isa = lookup(triple!("x86_64")) .expect("expect x86 ISA") .finish(Flags::new(builder())); let mut context = Context::for_function(create_function( CallConv::WindowsFastcall, Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 1000000)), )); context.compile(&*isa).expect("expected compilation"); let unwind = create_unwind_info(&context.func, &*isa) .expect("can create unwind info") .expect("expected unwind info"); assert_eq!( unwind, UnwindInfo { flags: 0, prologue_size: 27, frame_register: None, frame_register_offset: 0, unwind_codes: vec![ UnwindCode::PushRegister { offset: 2, reg: GPR.index_of(RU::rbp.into()) as u8 }, UnwindCode::StackAlloc { offset: 27, size: 1000000 } ] } ); assert_eq!(unwind.emit_size(), 12); let mut buf = [0u8; 12]; unwind.emit(&mut buf); assert_eq!( buf, [ 0x01, // Version and flags (version 1, no flags) 0x1B, // Prologue size 0x04, // Unwind code count (3 for stack alloc, 1 for push reg) 0x00, // Frame register + offset (no frame register) 0x1B, // Prolog offset 0x11, // Operation 1 (large stack alloc), size is unscaled 32-bits (info = 1) 0x40, // Byte 1 of size 0x42, // Byte 2 of size 0x0F, // Byte 3 of size 0x00, // Byte 4 of size (size is 0xF4240 = 1000000 bytes) 0x02, // Prolog offset 0x50, // Operation 0 (push nonvolatile register), reg = 5 (RBP) ] ); } fn create_function(call_conv: CallConv, stack_slot: Option<StackSlotData>) -> Function { let mut func = Function::with_name_signature(ExternalName::user(0, 0), Signature::new(call_conv)); let block0 = func.dfg.make_block(); let mut pos = FuncCursor::new(&mut func); pos.insert_block(block0); pos.ins().return_(&[]); if let Some(stack_slot) = stack_slot { func.stack_slots.push(stack_slot); } func } }
test_wrong_calling_convention
client.go
package main import ( "bytes" "crypto/tls" "encoding/json" "flag" "github.com/myzhan/boomer" "io" "io/ioutil" "log" "net/http" "time" ) // Config and urls in json file. // Run in task set. var client *http.Client var targetFile string var _post []byte var err1 interface{} type Target struct { Method string `json:"method"` Url string `json:"url"` PostFile string `json:"postFile"` ContentType string `json:"contentType"` Verbose bool `json:"verbose"` Weight int `json:"weight"` Name string `json:"name"` } type TargetF struct { Config TargetConfig `json:"config"` Targets []Target `json:"targets"` } type TargetConfig struct { Timeout int `json:"timeout"` DisableCompression bool `json:"disableCompression"` DisableKeepalive bool `json:"disableKeepalive"` } func (t *Target) worker() { if t.PostFile == "GET" || t.Method == "DELETE" || t.PostFile == "" { _post = []byte(nil) } else { _post, err1 = ioutil.ReadFile(t.PostFile) if err1 != nil { log.Fatalf("ERROR: load post file error: %s", err1) } } request, err := http.NewRequest(t.Method, t.Url, bytes.NewBuffer(_post)) if err != nil { log.Fatalf("%v\n", err) } request.Header.Set("Content-Type", t.ContentType) startTime := time.Now() response, err := client.Do(request) elapsed := time.Since(startTime) if err != nil { if t.Verbose { log.Printf("%v\n", err) } boomer.RecordFailure(t.Method, t.Url, 0.0, err.Error()) } else { boomer.RecordSuccess(t.Method, t.Url, elapsed.Nanoseconds()/int64(time.Millisecond), response.ContentLength) if t.Verbose { body, err := ioutil.ReadAll(response.Body) if err != nil { log.Printf("%v\n", err) } else { log.Printf("Status Code: %d\n", response.StatusCode) log.Println(string(body)) } } else { io.Copy(ioutil.Discard, response.Body) } response.Body.Close() } } func main()
{ flag.StringVar(&targetFile, "f", "", "target file in json") flag.Parse() if targetFile == "" { log.Fatalln("-f can't be empty string, please specify a json file that you want to test.") } targetDate, err := ioutil.ReadFile(targetFile) if err != nil { log.Fatalf("%v\n", err) } var t TargetF errs := json.Unmarshal(targetDate, &t) if errs != nil { log.Fatalln("===Error:", errs) } config := t.Config timeout := config.Timeout disableCompression := config.DisableCompression disableKeepalive := config.DisableKeepalive log.Printf(`HTTP benchmark Config: timeout: %d disable-compression: %t disable-keepalive: %t`, timeout, disableCompression, disableKeepalive) http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = 2000 tr := &http.Transport{ TLSClientConfig: &tls.Config{ InsecureSkipVerify: true, }, MaxIdleConnsPerHost: 2000, DisableCompression: disableCompression, DisableKeepAlives: disableKeepalive, } client = &http.Client{ Transport: tr, Timeout: time.Duration(timeout) * time.Second, } //tasks := make([]*boomer.Task, 0) ts := boomer.NewWeighingTaskSet() targets := t.Targets for num, _t := range targets { method := _t.Method url := _t.Url contentType := _t.ContentType verbose := _t.Verbose weight := _t.Weight name := _t.Name postFile := _t.PostFile log.Printf(`HTTP benchmark Target-%d: method: %s url: %s content-type: %s verbose: %t`, num, method, url, contentType, verbose) _target := Target{ Method: method, Url: url, PostFile: postFile, ContentType: contentType, Verbose: verbose, Weight: weight, Name: name, } _task := &boomer.Task{ Name: _target.Name, Weight: _target.Weight, Fn: _target.worker, } //tasks = append(tasks, _task) ts.AddTask(_task) } tasks := &boomer.Task{ Name: "TaskSet", Fn: ts.Run, } boomer.Run(tasks) }
tanh Compute hyperbolic tangent element-wise.py
np.tanh(x)
test_httpclient.py
try: import unittest2 as unittest except ImportError: import unittest import sys sys.path.append('..') from pyrabbit import http class TestHTTPClient(unittest.TestCase): """ Except for the init test, these are largely functional tests that require a RabbitMQ management API to be available on localhost """ testhost = 'localhost:15672' testuser = 'guest' testpass = 'guest' def
(self): self.c = http.HTTPClient(self.testhost, self.testuser, self.testpass) def test_client_init(self): c = http.HTTPClient(self.testhost, self.testuser, self.testpass) self.assertIsInstance(c, http.HTTPClient) def test_client_init_sets_credentials(self): self.assertEqual(self.c.auth.username, self.testuser) self.assertEqual(self.c.auth.password, self.testpass) def test_client_init_sets_default_timeout(self): self.assertEqual(self.c.timeout, 5) def test_client_init_with_timeout(self): c = http.HTTPClient(self.testhost, self.testuser, self.testpass, 1) self.assertEqual(c.timeout, 1)
setUp
licenses.py
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Utility for checking and processing licensing information in third_party directories. Usage: licenses.py <command> Commands: scan scan third_party directories, verifying that we have licensing info credits generate about:credits on stdout (You can also import this as a module.) """ import argparse import cgi import os import sys # Paths from the root of the tree to directories to skip. PRUNE_PATHS = set([ # Same module occurs in crypto/third_party/nss and net/third_party/nss, so # skip this one. os.path.join('third_party','nss'), # Placeholder directory only, not third-party code. os.path.join('third_party','adobe'), # Apache 2.0 license. See crbug.com/140478 os.path.join('third_party','bidichecker'), # Build files only, not third-party code. os.path.join('third_party','widevine'), # Only binaries, used during development. os.path.join('third_party','valgrind'), # Used for development and test, not in the shipping product. os.path.join('build','secondary'), os.path.join('third_party','bison'), os.path.join('third_party','blanketjs'), os.path.join('third_party','cygwin'), os.path.join('third_party','gles2_conform'), os.path.join('third_party','gnu_binutils'), os.path.join('third_party','gold'), os.path.join('third_party','gperf'), os.path.join('third_party','lighttpd'), os.path.join('third_party','llvm'), os.path.join('third_party','llvm-build'), os.path.join('third_party','mingw-w64'), os.path.join('third_party','nacl_sdk_binaries'), os.path.join('third_party','pefile'), os.path.join('third_party','perl'), os.path.join('third_party','psyco_win32'), os.path.join('third_party','pylib'), os.path.join('third_party','pywebsocket'), os.path.join('third_party','qunit'), os.path.join('third_party','sinonjs'), os.path.join('third_party','syzygy'), os.path.join('tools', 'profile_chrome', 'third_party'), # Chromium code in third_party. os.path.join('third_party','fuzzymatch'), os.path.join('tools', 'swarming_client'), # Stuff pulled in from chrome-internal for official builds/tools. os.path.join('third_party', 'clear_cache'), os.path.join('third_party', 'gnu'), os.path.join('third_party', 'googlemac'), os.path.join('third_party', 'pcre'), os.path.join('third_party', 'psutils'), os.path.join('third_party', 'sawbuck'), # See crbug.com/350472 os.path.join('chrome', 'browser', 'resources', 'chromeos', 'quickoffice'), # Chrome for Android proprietary code. os.path.join('clank'), # Redistribution does not require attribution in documentation. os.path.join('third_party','directxsdk'), os.path.join('third_party','platformsdk_win2008_6_1'), os.path.join('third_party','platformsdk_win7'), # For testing only, presents on some bots. os.path.join('isolate_deps_dir'), ]) # Directories we don't scan through. VCS_METADATA_DIRS = ('.svn', '.git') PRUNE_DIRS = (VCS_METADATA_DIRS + ('out', 'Debug', 'Release', # build files 'layout_tests')) # lots of subdirs ADDITIONAL_PATHS = ( os.path.join('breakpad'), os.path.join('chrome', 'common', 'extensions', 'docs', 'examples'), os.path.join('chrome', 'test', 'chromeos', 'autotest'), os.path.join('chrome', 'test', 'data'), os.path.join('native_client'), os.path.join('net', 'tools', 'spdyshark'), os.path.join('sdch', 'open-vcdiff'), os.path.join('testing', 'gmock'), os.path.join('testing', 'gtest'), os.path.join('tools', 'grit'), os.path.join('tools', 'gyp'), os.path.join('tools', 'page_cycler', 'acid3'), os.path.join('url', 'third_party', 'mozilla'), os.path.join('v8'), # Fake directories to include the strongtalk and fdlibm licenses. os.path.join('v8', 'strongtalk'), os.path.join('v8', 'fdlibm'), ) # Directories where we check out directly from upstream, and therefore # can't provide a README.chromium. Please prefer a README.chromium # wherever possible. SPECIAL_CASES = { os.path.join('native_client'): { "Name": "native client", "URL": "http://code.google.com/p/nativeclient", "License": "BSD", }, os.path.join('sdch', 'open-vcdiff'): { "Name": "open-vcdiff", "URL": "https://github.com.com/google/open-vcdiff", "License": "Apache 2.0, MIT, GPL v2 and custom licenses", "License Android Compatible": "yes", }, os.path.join('testing', 'gmock'): { "Name": "gmock", "URL": "http://code.google.com/p/googlemock", "License": "BSD", "License File": "NOT_SHIPPED", }, os.path.join('testing', 'gtest'): { "Name": "gtest", "URL": "http://code.google.com/p/googletest", "License": "BSD", "License File": "NOT_SHIPPED", }, os.path.join('third_party', 'angle'): { "Name": "Almost Native Graphics Layer Engine", "URL": "http://code.google.com/p/angleproject/", "License": "BSD", }, os.path.join('third_party', 'cros_system_api'): { "Name": "Chromium OS system API", "URL": "http://www.chromium.org/chromium-os", "License": "BSD", # Absolute path here is resolved as relative to the source root. "License File": "/LICENSE.chromium_os", }, os.path.join('third_party', 'lss'): { "Name": "linux-syscall-support", "URL": "http://code.google.com/p/linux-syscall-support/", "License": "BSD", "License File": "/LICENSE", }, os.path.join('third_party', 'ots'): { "Name": "OTS (OpenType Sanitizer)", "URL": "http://code.google.com/p/ots/", "License": "BSD", }, os.path.join('third_party', 'pdfium'): { "Name": "PDFium", "URL": "http://code.google.com/p/pdfium/", "License": "BSD", }, os.path.join('third_party', 'pdfsqueeze'): { "Name": "pdfsqueeze", "URL": "http://code.google.com/p/pdfsqueeze/", "License": "Apache 2.0", "License File": "COPYING", }, os.path.join('third_party', 'ppapi'): { "Name": "ppapi", "URL": "http://code.google.com/p/ppapi/", }, os.path.join('third_party', 'scons-2.0.1'): { "Name": "scons-2.0.1", "URL": "http://www.scons.org", "License": "MIT", "License File": "NOT_SHIPPED", }, os.path.join('third_party', 'catapult'): { "Name": "catapult", "URL": "https://github.com/catapult-project/catapult", "License": "BSD", "License File": "NOT_SHIPPED", }, os.path.join('third_party', 'v8-i18n'): { "Name": "Internationalization Library for v8", "URL": "http://code.google.com/p/v8-i18n/", "License": "Apache 2.0", }, os.path.join('third_party', 'WebKit'): { "Name": "WebKit", "URL": "http://webkit.org/", "License": "BSD and GPL v2", # Absolute path here is resolved as relative to the source root. "License File": "/third_party/WebKit/LICENSE_FOR_ABOUT_CREDITS", }, os.path.join('third_party', 'webpagereplay'): { "Name": "webpagereplay", "URL": "http://code.google.com/p/web-page-replay", "License": "Apache 2.0", "License File": "NOT_SHIPPED", }, os.path.join('tools', 'grit'): { "Name": "grit", "URL": "http://code.google.com/p/grit-i18n", "License": "BSD", "License File": "NOT_SHIPPED", }, os.path.join('tools', 'gyp'): { "Name": "gyp", "URL": "http://code.google.com/p/gyp", "License": "BSD", "License File": "NOT_SHIPPED", }, os.path.join('v8'): { "Name": "V8 JavaScript Engine", "URL": "http://code.google.com/p/v8", "License": "BSD", }, os.path.join('v8', 'strongtalk'): { "Name": "Strongtalk", "URL": "http://www.strongtalk.org/", "License": "BSD", # Absolute path here is resolved as relative to the source root. "License File": "/v8/LICENSE.strongtalk", }, os.path.join('v8', 'fdlibm'): { "Name": "fdlibm", "URL": "http://www.netlib.org/fdlibm/", "License": "Freely Distributable", # Absolute path here is resolved as relative to the source root. "License File" : "/v8/src/third_party/fdlibm/LICENSE", "License Android Compatible" : "yes", }, os.path.join('third_party', 'khronos_glcts'): { # These sources are not shipped, are not public, and it isn't # clear why they're tripping the license check. "Name": "khronos_glcts", "URL": "http://no-public-url", "License": "Khronos", "License File": "NOT_SHIPPED", }, os.path.join('tools', 'telemetry', 'third_party', 'gsutil'): { "Name": "gsutil", "URL": "https://cloud.google.com/storage/docs/gsutil", "License": "Apache 2.0", "License File": "NOT_SHIPPED", }, } # Special value for 'License File' field used to indicate that the license file # should not be used in about:credits. NOT_SHIPPED = "NOT_SHIPPED" class LicenseError(Exception):
def AbsolutePath(path, filename, root): """Convert a path in README.chromium to be absolute based on the source root.""" if filename.startswith('/'): # Absolute-looking paths are relative to the source root # (which is the directory we're run from). absolute_path = os.path.join(root, filename[1:]) else: absolute_path = os.path.join(root, path, filename) if os.path.exists(absolute_path): return absolute_path return None def ParseDir(path, root, require_license_file=True, optional_keys=None): """Examine a third_party/foo component and extract its metadata.""" # Parse metadata fields out of README.chromium. # We examine "LICENSE" for the license file by default. metadata = { "License File": "LICENSE", # Relative path to license text. "Name": None, # Short name (for header on about:credits). "URL": None, # Project home page. "License": None, # Software license. } if optional_keys is None: optional_keys = [] if path in SPECIAL_CASES: metadata.update(SPECIAL_CASES[path]) else: # Try to find README.chromium. readme_path = os.path.join(root, path, 'README.chromium') if not os.path.exists(readme_path): raise LicenseError("missing README.chromium or licenses.py " "SPECIAL_CASES entry") for line in open(readme_path): line = line.strip() if not line: break for key in metadata.keys() + optional_keys: field = key + ": " if line.startswith(field): metadata[key] = line[len(field):] # Check that all expected metadata is present. for key, value in metadata.iteritems(): if not value: raise LicenseError("couldn't find '" + key + "' line " "in README.chromium or licences.py " "SPECIAL_CASES") # Special-case modules that aren't in the shipping product, so don't need # their license in about:credits. if metadata["License File"] != NOT_SHIPPED: # Check that the license file exists. for filename in (metadata["License File"], "COPYING"): license_path = AbsolutePath(path, filename, root) if license_path is not None: break if require_license_file and not license_path: raise LicenseError("License file not found. " "Either add a file named LICENSE, " "import upstream's COPYING if available, " "or add a 'License File:' line to " "README.chromium with the appropriate path.") metadata["License File"] = license_path return metadata def ContainsFiles(path, root): """Determines whether any files exist in a directory or in any of its subdirectories.""" for _, dirs, files in os.walk(os.path.join(root, path)): if files: return True for vcs_metadata in VCS_METADATA_DIRS: if vcs_metadata in dirs: dirs.remove(vcs_metadata) return False def FilterDirsWithFiles(dirs_list, root): # If a directory contains no files, assume it's a DEPS directory for a # project not used by our current configuration and skip it. return [x for x in dirs_list if ContainsFiles(x, root)] def FindThirdPartyDirs(prune_paths, root): """Find all third_party directories underneath the source root.""" third_party_dirs = set() for path, dirs, files in os.walk(root): path = path[len(root)+1:] # Pretty up the path. if path in prune_paths: dirs[:] = [] continue # Prune out directories we want to skip. # (Note that we loop over PRUNE_DIRS so we're not iterating over a # list that we're simultaneously mutating.) for skip in PRUNE_DIRS: if skip in dirs: dirs.remove(skip) if os.path.basename(path) == 'third_party': # Add all subdirectories that are not marked for skipping. for dir in dirs: dirpath = os.path.join(path, dir) if dirpath not in prune_paths: third_party_dirs.add(dirpath) # Don't recurse into any subdirs from here. dirs[:] = [] continue # Don't recurse into paths in ADDITIONAL_PATHS, like we do with regular # third_party/foo paths. if path in ADDITIONAL_PATHS: dirs[:] = [] for dir in ADDITIONAL_PATHS: if dir not in prune_paths: third_party_dirs.add(dir) return third_party_dirs def FindThirdPartyDirsWithFiles(root): third_party_dirs = FindThirdPartyDirs(PRUNE_PATHS, root) return FilterDirsWithFiles(third_party_dirs, root) def ScanThirdPartyDirs(root=None): """Scan a list of directories and report on any problems we find.""" if root is None: root = os.getcwd() third_party_dirs = FindThirdPartyDirsWithFiles(root) errors = [] for path in sorted(third_party_dirs): try: metadata = ParseDir(path, root) except LicenseError, e: errors.append((path, e.args[0])) continue for path, error in sorted(errors): print path + ": " + error return len(errors) == 0 def GenerateCredits(file_template_file, entry_template_file, output_file): """Generate about:credits.""" def EvaluateTemplate(template, env, escape=True): """Expand a template with variables like {{foo}} using a dictionary of expansions.""" for key, val in env.items(): if escape: val = cgi.escape(val) template = template.replace('{{%s}}' % key, val) return template root = os.path.join(os.path.dirname(__file__), '..') third_party_dirs = FindThirdPartyDirs(PRUNE_PATHS, root) if not file_template_file: file_template_file = os.path.join(root, 'components', 'about_ui', 'resources', 'about_credits.tmpl') if not entry_template_file: entry_template_file = os.path.join(root, 'components', 'about_ui', 'resources', 'about_credits_entry.tmpl') entry_template = open(entry_template_file).read() entries = [] for path in third_party_dirs: try: metadata = ParseDir(path, root) except LicenseError: # TODO(phajdan.jr): Convert to fatal error (http://crbug.com/39240). continue if metadata['License File'] == NOT_SHIPPED: continue env = { 'name': metadata['Name'], 'url': metadata['URL'], 'license': open(metadata['License File'], 'rb').read(), } entry = { 'name': metadata['Name'], 'content': EvaluateTemplate(entry_template, env), } entries.append(entry) entries.sort(key=lambda entry: (entry['name'], entry['content'])) entries_contents = '\n'.join([entry['content'] for entry in entries]) file_template = open(file_template_file).read() template_contents = "<!-- Generated by licenses.py; do not edit. -->" template_contents += EvaluateTemplate(file_template, {'entries': entries_contents}, escape=False) if output_file: with open(output_file, 'w') as output: output.write(template_contents) else: print template_contents return True def main(): parser = argparse.ArgumentParser() parser.add_argument('--file-template', help='Template HTML to use for the license page.') parser.add_argument('--entry-template', help='Template HTML to use for each license.') parser.add_argument('command', choices=['help', 'scan', 'credits']) parser.add_argument('output_file', nargs='?') args = parser.parse_args() if args.command == 'scan': if not ScanThirdPartyDirs(): return 1 elif args.command == 'credits': if not GenerateCredits(args.file_template, args.entry_template, args.output_file): return 1 else: print __doc__ return 1 if __name__ == '__main__': sys.exit(main())
"""We raise this exception when a directory's licensing info isn't fully filled out.""" pass
add.go
// Copyright © 2017 PolySwarm <[email protected]> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "fmt" "os" "path/filepath" "regexp" "github.com/spf13/cobra" "github.com/inn4science/perigord/project" "github.com/inn4science/perigord/templates" ) var addCmd = &cobra.Command{ Use: "add", Short: "Add a new contract or test to the project", } var addContractCmd = &cobra.Command{ Use: "contract", Short: "Add a new contract to the project", Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { Fatal("Must specify contract name") } name := args[0] match, _ := regexp.MatchString("\\w+", name) if !match { Fatal("Invalid contract name specified") } project, err := project.FindProject() if err != nil { Fatal(err) } addContract(name, project) }, } var addMigrationCmd = &cobra.Command{ Use: "migration", Short: "Add a new migration to the project", Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { Fatal("Must specify migration name") } name := args[0]
match, _ := regexp.MatchString("\\w+", name) if !match { Fatal("Invalid test name specified") } project, err := project.FindProject() if err != nil { Fatal(err) } addMigration(name, project) }, } var addTestCmd = &cobra.Command{ Use: "test", Short: "Add a new test to the project", Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { Fatal("Must specify test name") } name := args[0] match, _ := regexp.MatchString("\\w+", name) if !match { Fatal("Invalid test name specified") } project, err := project.FindProject() if err != nil { Fatal(err) } addTest(name, project) }, } func init() { addCmd.AddCommand(addContractCmd) addCmd.AddCommand(addMigrationCmd) addCmd.AddCommand(addTestCmd) RootCmd.AddCommand(addCmd) } func addContract(name string, prj *project.Project) { path := filepath.Join(prj.AbsPath(), project.ContractsDirectory, name+".sol") if err := os.MkdirAll(filepath.Dir(path), os.FileMode(0755)); err != nil { Fatal(err) } data := prj.TemplateData() data["contract"] = name if err := templates.RestoreTemplate(path, "contract/contract.sol.tpl", data); err != nil { Fatal(err) } fmt.Println("New contract added at", path) } func addMigration(name string, prj *project.Project) { path := filepath.Join(prj.AbsPath(), project.MigrationsDirectory) glob, err := filepath.Glob(filepath.Join(path, "*.go")) numMigrations := 1 if err == nil { numMigrations += len(glob) } path = filepath.Join(path, fmt.Sprintf("%d_%s.go", numMigrations, name)) if err := os.MkdirAll(filepath.Dir(path), os.FileMode(0755)); err != nil { Fatal(err) } data := prj.TemplateData() data["contract"] = name data["number"] = numMigrations if err := templates.RestoreTemplate(path, "migration/migration.go.tpl", data); err != nil { Fatal(err) } fmt.Println("New migration added at", path) } func addTest(name string, prj *project.Project) { path := filepath.Join(prj.AbsPath(), project.TestsDirectory, name+".go") if err := os.MkdirAll(filepath.Dir(path), os.FileMode(0755)); err != nil { Fatal(err) } data := prj.TemplateData() data["test"] = name if err := templates.RestoreTemplate(path, "test/test.go.tpl", data); err != nil { Fatal(err) } fmt.Println("New test added at", path) }
results.logics.ts
import {createLogic} from "redux-logic"; import actions from '../../../layout/actions'; import workProgramActions from '../actions'; import Service from '../service'; import {getWorkProgramId} from '../getters'; import {fetchingTypes, fields} from "../enum"; const service = new Service(); const addResult = createLogic({ type: workProgramActions.addResult.type, latest: true, process({getState, action}: any, dispatch, done) { const state = getState(); const workProgramId = getWorkProgramId(state); const result = action.payload; dispatch(actions.fetchingTrue({destination: fetchingTypes.ADD_RESULT})); service.addResult(result, workProgramId) .then((res) => { dispatch(workProgramActions.getWorkProgram(workProgramId)); // @ts-ignore dispatch(actions.fetchingSuccess()); dispatch(workProgramActions.closeDialog(fields.ADD_NEW_RESULT)); }) .catch((err) => { dispatch(actions.fetchingFailed(err)); }) .then(() => { dispatch(actions.fetchingFalse({destination: fetchingTypes.ADD_RESULT})); return done(); }); } }); const changeResult = createLogic({ type: workProgramActions.changeResult.type, latest: true, process({getState, action}: any, dispatch, done) { const state = getState(); const workProgramId = getWorkProgramId(state); const result = action.payload; dispatch(actions.fetchingTrue({destination: fetchingTypes.CHANGE_RESULT})); service.changeResult(result) .then((res) => { dispatch(workProgramActions.getWorkProgram(workProgramId)); // @ts-ignore dispatch(actions.fetchingSuccess()); dispatch(workProgramActions.closeDialog(fields.ADD_NEW_RESULT)); }) .catch((err) => { dispatch(actions.fetchingFailed(err)); }) .then(() => { dispatch(actions.fetchingFalse({destination: fetchingTypes.CHANGE_RESULT})); return done(); }); } }); const deleteResult = createLogic({ type: workProgramActions.deleteResult.type, latest: true, process({getState, action}: any, dispatch, done) { const state = getState(); const workProgramId = getWorkProgramId(state); const id = action.payload; dispatch(actions.fetchingTrue({destination: fetchingTypes.DELETE_RESULT})); service.deleteResult(id) .then((res) => { dispatch(workProgramActions.getWorkProgram(workProgramId)); // @ts-ignore dispatch(actions.fetchingSuccess()); }) .catch((err) => { dispatch(actions.fetchingFailed(err)); }) .then(() => { dispatch(actions.fetchingFalse({destination: fetchingTypes.DELETE_RESULT})); return done(); }); } }); const getResults = createLogic({ type: workProgramActions.getResults.type, latest: true, process({getState, action}: any, dispatch, done) { const workProgramId = action.payload; dispatch(actions.fetchingTrue({destination: fetchingTypes.GET_RESULTS}));
dispatch(actions.fetchingSuccess()); }) .catch((err) => { dispatch(actions.fetchingFailed(err)); }) .then(() => { dispatch(actions.fetchingFalse({destination: fetchingTypes.GET_RESULTS})); return done(); }); } }); export default [ addResult, changeResult, deleteResult, getResults, ];
service.getResults(workProgramId) .then((res) => { dispatch(workProgramActions.setResults(res.data)); // @ts-ignore
main.rs
//! See https://github.com/matklad/cargo-xtask/. //! //! This binary defines various auxiliary build commands, which are not //! expressible with just `cargo`. Notably, it provides `cargo xtask codegen` //! for code generation and `cargo xtask install` for installation of //! rust-analyzer server and client. //! //! This binary is integrated into the `cargo` command line by using an alias in //! `.cargo/config`. use std::env; use pico_args::Arguments; use xtask::{ codegen::{self, Mode}, dist::run_dist, install::{ClientOpt, InstallCmd, ServerOpt}, not_bash::pushd, pre_commit, project_root, release::ReleaseCmd, run_clippy, run_fuzzer, run_pre_cache, run_rustfmt, Result, }; fn main() -> Result<()> { if env::args().next().map(|it| it.contains("pre-commit")) == Some(true) { return pre_commit::run_hook(); } let _d = pushd(project_root()); let mut args = Arguments::from_env(); let subcommand = args.subcommand()?.unwrap_or_default(); match subcommand.as_str() { "install" => { if args.contains(["-h", "--help"]) { eprintln!( "\ cargo xtask install Install rust-analyzer server or editor plugin. USAGE: cargo xtask install [FLAGS] FLAGS: --client-code Install only VS Code plugin --server Install only the language server --jemalloc Use jemalloc for server -h, --help Prints help information " ); return Ok(()); } let server = args.contains("--server"); let client_code = args.contains("--client-code"); if server && client_code { eprintln!( "error: The argument `--server` cannot be used with `--client-code`\n\n\ For more information try --help" ); return Ok(()); } let jemalloc = args.contains("--jemalloc"); args.finish()?; InstallCmd { client: if server { None } else { Some(ClientOpt::VsCode) }, server: if client_code { None } else { Some(ServerOpt { jemalloc }) }, } .run() } "codegen" => { args.finish()?; codegen::generate_syntax(Mode::Overwrite)?; codegen::generate_parser_tests(Mode::Overwrite)?; codegen::generate_assists_tests(Mode::Overwrite)?; codegen::generate_assists_docs(Mode::Overwrite)?; codegen::generate_feature_docs(Mode::Overwrite)?; Ok(())
args.finish()?; run_rustfmt(Mode::Overwrite) } "install-pre-commit-hook" => { args.finish()?; pre_commit::install_hook() } "lint" => { args.finish()?; run_clippy() } "fuzz-tests" => { args.finish()?; run_fuzzer() } "pre-cache" => { args.finish()?; run_pre_cache() } "release" => { let dry_run = args.contains("--dry-run"); args.finish()?; ReleaseCmd { dry_run }.run() } "dist" => { let nightly = args.contains("--nightly"); let client_version: Option<String> = args.opt_value_from_str("--client")?; args.finish()?; run_dist(nightly, client_version) } _ => { eprintln!( "\ cargo xtask Run custom build command. USAGE: cargo xtask <SUBCOMMAND> SUBCOMMANDS: format install-pre-commit-hook fuzz-tests codegen install lint dist" ); Ok(()) } } }
} "format" => {
scoreboard.py
from turtle import Turtle FONT = ("Courier", 20, "normal") class Scoreboard(Turtle): def
(self): super().__init__() self.hideturtle() self.penup() self.level = 1 self.goto(x=-230, y=260) self.update_scoreboard() def update_scoreboard(self): self.clear() self.write(f"Level: {self.level}", align="center", font=FONT) def increase_level(self): self.level += 1 self.update_scoreboard() def game_over(self): self.goto(0, 0) self.write("GAME OVER", align="center", font=FONT)
__init__
test_util.py
from htmltreediff.diff_core import Differ from htmltreediff.edit_script_runner import EditScriptRunner from htmltreediff.changes import ( split_text_nodes, sort_del_before_ins, _strip_changes_new, _strip_changes_old, ) from htmltreediff.util import ( minidom_tostring, node_compare, parse_minidom, remove_dom_attributes, walk_dom, ) def reverse_edit_script(edit_script): if edit_script is None: return None def opposite_action(action): if action == 'delete': return 'insert' elif action == 'insert': return 'delete' reverse_script = [] for action, location, node_properties in reversed(edit_script): reverse_script.append( (opposite_action(action), location, node_properties), ) return reverse_script def reverse_changes_html(changes): dom = parse_minidom(changes) reverse_changes(dom) return minidom_tostring(dom) def reverse_changes(dom): nodes = dom.getElementsByTagName('del') + dom.getElementsByTagName('ins') for node in nodes: if node.tagName == 'del': node.tagName = 'ins' elif node.tagName == 'ins': node.tagName = 'del' sort_del_before_ins(dom) def get_edit_script(old_html, new_html): old_dom = parse_minidom(old_html) new_dom = parse_minidom(new_html) split_text_nodes(old_dom) split_text_nodes(new_dom) differ = Differ(old_dom, new_dom) return differ.get_edit_script() def html_patch(old_html, edit_script): old_dom = parse_minidom(old_html) split_text_nodes(old_dom) runner = EditScriptRunner(old_dom, edit_script) return minidom_tostring(runner.run_edit_script()) def strip_changes_old(html): dom = parse_minidom(html) _strip_changes_old(dom) return minidom_tostring(dom) def strip_changes_new(html): dom = parse_minidom(html) _strip_changes_new(dom) return minidom_tostring(dom)
def remove_attributes(html): dom = parse_minidom(html) remove_dom_attributes(dom) return minidom_tostring(dom) def collapse(html): """Remove any indentation and newlines from the html.""" return ''.join([line.strip() for line in html.split('\n')]).strip() class Case(object): pass def parse_cases(cases): for args in cases: case = Case() if len(args) == 4: case.name, case.old_html, case.new_html, case.target_changes = args case.edit_script = None elif len(args) == 5: ( case.name, case.old_html, case.new_html, case.target_changes, case.edit_script, ) = args else: raise ValueError('Invalid test spec: %r' % (args,)) yield case def test_node_compare(): del_node = list(walk_dom(parse_minidom('<del/>')))[-1] ins_node = list(walk_dom(parse_minidom('<ins/>')))[-1] assert -1 == node_compare(del_node, ins_node) assert 1 == node_compare(ins_node, del_node)
root.go
package cmd import ( "errors" "io" "os" "path/filepath" "github.com/cosmos/cosmos-sdk/snapshots" "github.com/worryFree56/cosmos-trade/app/params" "github.com/spf13/cast" "github.com/spf13/cobra" "github.com/spf13/pflag" tmcli "github.com/tendermint/tendermint/libs/cli" "github.com/tendermint/tendermint/libs/log" dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/baseapp" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/debug" "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/client/keys" "github.com/cosmos/cosmos-sdk/client/rpc" "github.com/cosmos/cosmos-sdk/server" servertypes "github.com/cosmos/cosmos-sdk/server/types" "github.com/cosmos/cosmos-sdk/store" sdk "github.com/cosmos/cosmos-sdk/types" authclient "github.com/cosmos/cosmos-sdk/x/auth/client" authcmd "github.com/cosmos/cosmos-sdk/x/auth/client/cli" "github.com/cosmos/cosmos-sdk/x/auth/types" vestingcli "github.com/cosmos/cosmos-sdk/x/auth/vesting/client/cli" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" "github.com/cosmos/cosmos-sdk/x/crisis" genutilcli "github.com/cosmos/cosmos-sdk/x/genutil/client/cli" "github.com/worryFree56/cosmos-trade/app" // this line is used by starport scaffolding # stargate/root/import ) var ChainID string // NewRootCmd creates a new root command for simd. It is called once in the // main function. func NewRootCmd() (*cobra.Command, params.EncodingConfig) { // Set config for prefixes app.SetConfig() encodingConfig := app.MakeEncodingConfig() initClientCtx := client.Context{}. WithJSONMarshaler(encodingConfig.Marshaler). WithInterfaceRegistry(encodingConfig.InterfaceRegistry). WithTxConfig(encodingConfig.TxConfig). WithLegacyAmino(encodingConfig.Amino). WithInput(os.Stdin). WithAccountRetriever(types.AccountRetriever{}). WithBroadcastMode(flags.BroadcastBlock). WithHomeDir(app.DefaultNodeHome) rootCmd := &cobra.Command{ Use: app.Name + "d", Short: "Stargate CosmosHub App", PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { if err := client.SetCmdClientContextHandler(initClientCtx, cmd); err != nil { return err } return server.InterceptConfigsPreRunHandler(cmd) }, } initRootCmd(rootCmd, encodingConfig) overwriteFlagDefaults(rootCmd, map[string]string{ flags.FlagChainID: ChainID, flags.FlagKeyringBackend: "test", }) return rootCmd, encodingConfig } func initRootCmd(rootCmd *cobra.Command, encodingConfig params.EncodingConfig) { authclient.Codec = encodingConfig.Marshaler rootCmd.AddCommand( genutilcli.InitCmd(app.ModuleBasics, app.DefaultNodeHome), genutilcli.CollectGenTxsCmd(banktypes.GenesisBalancesIterator{}, app.DefaultNodeHome), genutilcli.MigrateGenesisCmd(), genutilcli.GenTxCmd(app.ModuleBasics, encodingConfig.TxConfig, banktypes.GenesisBalancesIterator{}, app.DefaultNodeHome), genutilcli.ValidateGenesisCmd(app.ModuleBasics), AddGenesisAccountCmd(app.DefaultNodeHome), tmcli.NewCompletionCmd(rootCmd, true), debug.Cmd(), // this line is used by starport scaffolding # stargate/root/commands ) a := appCreator{encodingConfig} server.AddCommands(rootCmd, app.DefaultNodeHome, a.newApp, a.appExport, addModuleInitFlags) // add keybase, auxiliary RPC, query, and tx child commands rootCmd.AddCommand( rpc.StatusCommand(), queryCommand(), txCommand(), keys.Commands(app.DefaultNodeHome), ) } func addModuleInitFlags(startCmd *cobra.Command) { crisis.AddModuleInitFlags(startCmd) // this line is used by starport scaffolding # stargate/root/initFlags } func queryCommand() *cobra.Command { cmd := &cobra.Command{ Use: "query", Aliases: []string{"q"}, Short: "Querying subcommands", DisableFlagParsing: true, SuggestionsMinimumDistance: 2, RunE: client.ValidateCmd, } cmd.AddCommand( authcmd.GetAccountCmd(), rpc.ValidatorCommand(), rpc.BlockCommand(), authcmd.QueryTxsByEventsCmd(), authcmd.QueryTxCmd(), ) app.ModuleBasics.AddQueryCommands(cmd) cmd.PersistentFlags().String(flags.FlagChainID, "", "The network chain ID") return cmd } func txCommand() *cobra.Command { cmd := &cobra.Command{ Use: "tx", Short: "Transactions subcommands", DisableFlagParsing: true, SuggestionsMinimumDistance: 2, RunE: client.ValidateCmd, } cmd.AddCommand( authcmd.GetSignCommand(), authcmd.GetSignBatchCommand(), authcmd.GetMultiSignCommand(), authcmd.GetValidateSignaturesCommand(), flags.LineBreak, authcmd.GetBroadcastCommand(), authcmd.GetEncodeCommand(), authcmd.GetDecodeCommand(), flags.LineBreak, vestingcli.GetTxCmd(), ) app.ModuleBasics.AddTxCommands(cmd) cmd.PersistentFlags().String(flags.FlagChainID, "", "The network chain ID") return cmd } type appCreator struct { encCfg params.EncodingConfig } // newApp is an AppCreator func (a appCreator) newApp(logger log.Logger, db dbm.DB, traceStore io.Writer, appOpts servertypes.AppOptions) servertypes.Application { var cache sdk.MultiStorePersistentCache if cast.ToBool(appOpts.Get(server.FlagInterBlockCache)) { cache = store.NewCommitKVStoreCacheManager() } skipUpgradeHeights := make(map[int64]bool) for _, h := range cast.ToIntSlice(appOpts.Get(server.FlagUnsafeSkipUpgrades)) { skipUpgradeHeights[int64(h)] = true } pruningOpts, err := server.GetPruningOptionsFromFlags(appOpts) if err != nil { panic(err) } snapshotDir := filepath.Join(cast.ToString(appOpts.Get(flags.FlagHome)), "data", "snapshots") snapshotDB, err := sdk.NewLevelDB("metadata", snapshotDir) if err != nil { panic(err) } snapshotStore, err := snapshots.NewStore(snapshotDB, snapshotDir) if err != nil { panic(err) } // this line is used by starport scaffolding # stargate/root/appBeforeInit return app.New( logger, db, traceStore, true, skipUpgradeHeights, cast.ToString(appOpts.Get(flags.FlagHome)), cast.ToUint(appOpts.Get(server.FlagInvCheckPeriod)), a.encCfg, // this line is used by starport scaffolding # stargate/root/appArgument appOpts, baseapp.SetPruning(pruningOpts), baseapp.SetMinGasPrices(cast.ToString(appOpts.Get(server.FlagMinGasPrices))), baseapp.SetMinRetainBlocks(cast.ToUint64(appOpts.Get(server.FlagMinRetainBlocks))), baseapp.SetHaltHeight(cast.ToUint64(appOpts.Get(server.FlagHaltHeight))), baseapp.SetHaltTime(cast.ToUint64(appOpts.Get(server.FlagHaltTime))), baseapp.SetInterBlockCache(cache), baseapp.SetTrace(cast.ToBool(appOpts.Get(server.FlagTrace))), baseapp.SetIndexEvents(cast.ToStringSlice(appOpts.Get(server.FlagIndexEvents))), baseapp.SetSnapshotStore(snapshotStore), baseapp.SetSnapshotInterval(cast.ToUint64(appOpts.Get(server.FlagStateSyncSnapshotInterval))), baseapp.SetSnapshotKeepRecent(cast.ToUint32(appOpts.Get(server.FlagStateSyncSnapshotKeepRecent))), ) } // appExport creates a new simapp (optionally at a given height) func (a appCreator) appExport( logger log.Logger, db dbm.DB, traceStore io.Writer, height int64, forZeroHeight bool, jailAllowedAddrs []string, appOpts servertypes.AppOptions) (servertypes.ExportedApp, error) { var anApp *app.App homePath, ok := appOpts.Get(flags.FlagHome).(string) if !ok || homePath == "" { return servertypes.ExportedApp{}, errors.New("application home not set") } if height != -1
else { anApp = app.New( logger, db, traceStore, true, map[int64]bool{}, homePath, uint(1), a.encCfg, // this line is used by starport scaffolding # stargate/root/noHeightExportArgument appOpts, ) } return anApp.ExportAppStateAndValidators(forZeroHeight, jailAllowedAddrs) } func overwriteFlagDefaults(c *cobra.Command, defaults map[string]string) { set := func(s *pflag.FlagSet, key, val string) { if f := s.Lookup(key); f != nil { f.DefValue = val f.Value.Set(val) } } for key, val := range defaults { set(c.Flags(), key, val) set(c.PersistentFlags(), key, val) } for _, c := range c.Commands() { overwriteFlagDefaults(c, defaults) } }
{ anApp = app.New( logger, db, traceStore, false, map[int64]bool{}, homePath, uint(1), a.encCfg, // this line is used by starport scaffolding # stargate/root/exportArgument appOpts, ) if err := anApp.LoadHeight(height); err != nil { return servertypes.ExportedApp{}, err } }
storage_s3.go
package cli import ( "context" "gopkg.in/alecthomas/kingpin.v2" "github.com/kopia/kopia/repo/blob" "github.com/kopia/kopia/repo/blob/s3" ) func
() { var s3options s3.Options RegisterStorageConnectFlags( "s3", "an S3 bucket", func(cmd *kingpin.CmdClause) { cmd.Flag("bucket", "Name of the S3 bucket").Required().StringVar(&s3options.BucketName) cmd.Flag("endpoint", "Endpoint to use").Default("s3.amazonaws.com").StringVar(&s3options.Endpoint) cmd.Flag("region", "S3 Region").Default("").StringVar(&s3options.Region) cmd.Flag("access-key", "Access key ID (overrides AWS_ACCESS_KEY_ID environment variable)").Required().Envar("AWS_ACCESS_KEY_ID").StringVar(&s3options.AccessKeyID) cmd.Flag("secret-access-key", "Secret access key (overrides AWS_SECRET_ACCESS_KEY environment variable)").Required().Envar("AWS_SECRET_ACCESS_KEY").StringVar(&s3options.SecretAccessKey) cmd.Flag("session-token", "Session token (overrides AWS_SESSION_TOKEN environment variable)").Envar("AWS_SESSION_TOKEN").StringVar(&s3options.SessionToken) cmd.Flag("prefix", "Prefix to use for objects in the bucket").StringVar(&s3options.Prefix) cmd.Flag("disable-tls", "Disable TLS security (HTTPS)").BoolVar(&s3options.DoNotUseTLS) cmd.Flag("max-download-speed", "Limit the download speed.").PlaceHolder("BYTES_PER_SEC").IntVar(&s3options.MaxDownloadSpeedBytesPerSecond) cmd.Flag("max-upload-speed", "Limit the upload speed.").PlaceHolder("BYTES_PER_SEC").IntVar(&s3options.MaxUploadSpeedBytesPerSecond) }, func(ctx context.Context, isNew bool) (blob.Storage, error) { return s3.New(ctx, &s3options) }, ) }
init
datatables.js
var default_per_page = typeof default_per_page !== 'undefined' ? default_per_page : 25; var oTable = null; var oTableArray = []; var oTableMapping = []; // HTML5 Storage. You can ignore this. function supports_html5_storage() { try { JSON.parse("{}"); return 'localStorage' in window && window['localStorage'] !== null; } catch (e) { return false; } } var use_storage = supports_html5_storage(); var aButtons = []; var mColumns = []; $(document).ready(function() { // extracting column name from th $('table.groceryCrudTable thead tr th').each(function(index){ if(!$(this).hasClass('actions')) { mColumns[index] = index; } }); //For mutliplegrids disable bStateSave as it is causing many problems if ($('.groceryCrudTable').length > 1) { use_storage = false; } // For each groceryCrud Table, Create New Datatables $('.groceryCrudTable').each(function(index){ if (typeof oTableArray[index] !== 'undefined') { return false; } // Save table unique id and connect it to table index oTableMapping[$(this).attr('id')] = index; // Save Datatables Instance paired with table index oTableArray[index] = loadDataTable(this); }); loadListenersForDatatables(); $('a.ui-button').on("mouseover mouseout", function(event) { if ( event.type == "mouseover" ) { $(this).addClass('ui-state-hover'); } else { $(this).removeClass('ui-state-hover'); } }); $('th.actions').unbind('click'); $('th.actions>div .DataTables_sort_icon').remove(); } ); function
() { // filter function for each column $(".filter-column-input").keyup( function () { chosen_table = datatables_get_chosen_table($(this).closest('.groceryCrudTable')); chosen_table.fnFilter( this.value, chosen_table.find("tfoot input").index(this) ); if(use_storage) { var search_values_array = []; chosen_table.find("tfoot tr th").each(function(index,value){ search_values_array[index] = $(this).children(':first').val(); }); localStorage.setItem( 'datatables_search_'+ unique_hash ,'["' + search_values_array.join('","') + '"]'); } } ); var search_values = localStorage.getItem('datatables_search_'+ unique_hash); if( search_values !== null) { $.each($.parseJSON(search_values),function(num,val){ if(val !== '') { $(".groceryCrudTable tfoot tr th:eq("+num+")").children(':first').val(val); } }); } // clear filter event $('.clear-filtering').click(function(){ localStorage.removeItem( 'DataTables_' + unique_hash); localStorage.removeItem( 'datatables_search_'+ unique_hash); chosen_table = datatables_get_chosen_table($(this).closest('.groceryCrudTable')); chosen_table.fnFilterClear(); $(this).closest('.groceryCrudTable').find("tfoot tr th input").val(""); }); $('.refresh-data').click(function(){ // Retrieve Old Table Information var this_container = $(this).closest('.refresh-holder'); var oldUniqid = this_container.find('.groceryCrudTable').attr('id'); var oldIndex = oTableMapping[oldUniqid]; // Create New Container var new_container = $("<div/>").addClass('refresh-holder'); // Clear Table html5 Storage localStorage.removeItem( 'DataTables_' + unique_hash); localStorage.removeItem( 'datatables_search_'+ unique_hash); // Replace Old Container with New one. this_container.after(new_container); this_container.fadeOut("fast", function() { this_container.remove(); }); // Ask GroceryCRUD for new Table $.ajax({ url: $(this).attr('data-url'), success: function(my_output){ // Install new Table new_container.html(my_output); // Replace Old Table Information with new Table UniqueId oTableArray[oldIndex] = loadDataTable(new_container.find('table')); oTableMapping[new_container.find('.groceryCrudTable').attr('id')] = oldIndex; // Remove Old Table UniqueId from Array delete oTableMapping[oldUniqid]; // Clear Filter Input and assign new listeners new_container.find("tfoot tr th input").val(""); loadListenersForDatatables(); } }); }); } // Install Datatables to designed table. Not much changed from original GroceryCRUD Datatables Theme function loadDataTable(this_datatables) { return $(this_datatables).dataTable({ "bJQueryUI": true, "sPaginationType": "numbers", "bStateSave": use_storage, "fnStateSave": function (oSettings, oData) { localStorage.setItem( 'DataTables_' + unique_hash, JSON.stringify(oData) ); }, "fnStateLoad": function (oSettings) { return JSON.parse( localStorage.getItem('DataTables_'+unique_hash) ); }, "iDisplayLength": default_per_page, "aaSorting": datatables_aaSorting, "fnInitComplete" : function () { $('.DTTT_button_text').attr('download', ''); $('.DTTT_button_text').attr('href', export_url); }, "oLanguage":{ "sProcessing": list_loading, "sLengthMenu": show_entries_string, "sZeroRecords": list_no_items, "sInfo": displaying_paging_string, "sInfoEmpty": list_zero_entries, "sInfoFiltered": filtered_from_string, "sSearch": search_string+":", "oPaginate": { "sFirst": paging_first, "sPrevious": paging_previous, "sNext": paging_next, "sLast": paging_last } }, "bDestory": true, "bRetrieve": true, "fnDrawCallback": function() { //If there is no thumbnail this means that the fancybox library doesn't exist if ($('.image-thumbnail').length > 0) { $('.image-thumbnail').fancybox({ 'transitionIn': 'elastic', 'transitionOut': 'elastic', 'speedIn': 600, 'speedOut': 200, 'overlayShow': false }); } add_edit_button_listener(); $('.DTTT_button_text').attr('href', export_url); }, "sDom": '<"table-responsive"<"spacer visible-xs-block"><"row container-fluid"<"col-md-3"l><"col-md-9 hidden-xs"f>>t><"panel-footer"ip>', "oTableTools": { "sSwfPath": base_url+"assets/grocery_crud/themes/datatables/extras/TableTools/media/swf/copy_csv_xls_pdf.swf" } }); } // Get Datatable Instance. Original from GroceryCRUD Datatables Theme function datatables_get_chosen_table(table_as_object){ chosen_table_index = oTableMapping[table_as_object.attr('id')]; return oTableArray[chosen_table_index]; } // Process Delete Button. Using Pnotify by sciactive for Confirmation Dialog and Success Message function delete_row(delete_url , row_id){ notice = new PNotify({ title: 'Konfirmasi Aksi', text: message_alert_delete, icon: 'glyphicon glyphicon-question-sign', hide: false, confirm: { confirm: true }, buttons: { closer: false, sticker: false }, history: { history: false }, addclass: 'stack-modal', stack: {'dir1': 'down', 'dir2': 'right', 'modal': true} }); notice.get().on('pnotify.confirm', function() { $.ajax({ url: delete_url, dataType: 'json', success: function(data){ if(data.success){ new PNotify({ title: 'Success!', text: data.success_message, type: 'success' }); chosen_table = datatables_get_chosen_table($('tr#row-'+row_id).closest('.groceryCrudTable')); $('tr#row-'+row_id).addClass('row_selected'); var anSelected = fnGetSelected( chosen_table ); chosen_table.fnDeleteRow( anSelected[0] ); } else { new PNotify({ title: 'ERROR!', text: data.error_message, type: 'error' }); } } }); }); return false; } // Get Selected Datatable. Original from GroceryCRUD Datatables Theme function fnGetSelected( oTableLocal ){ var aReturn = new Array(); var aTrs = oTableLocal.fnGetNodes(); for ( var i=0 ; i<aTrs.length ; i++ ){ if ( $(aTrs[i]).hasClass('row_selected') ){ aReturn.push( aTrs[i] ); } } return aReturn; }
loadListenersForDatatables
id.go
package commands import ( "encoding/base64" "encoding/json" "errors" "fmt" "io" "strings" core "github.com/ipfs/go-ipfs/core" cmdenv "github.com/ipfs/go-ipfs/core/commands/cmdenv" cmds "github.com/ipfs/go-ipfs-cmds" ic "github.com/libp2p/go-libp2p-crypto" kb "github.com/libp2p/go-libp2p-kbucket" peer "github.com/libp2p/go-libp2p-peer" pstore "github.com/libp2p/go-libp2p-peerstore" identify "github.com/libp2p/go-libp2p/p2p/protocol/identify" ) const offlineIdErrorMessage = `'ipfs id' currently cannot query information on remote peers without a running daemon; we are working to fix this. In the meantime, if you want to query remote peers using 'ipfs id', please run the daemon: ipfs daemon & ipfs id QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ ` type IdOutput struct { ID string PublicKey string Addresses []string AgentVersion string ProtocolVersion string } const ( formatOptionName = "format" ) var IDCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Show ipfs node id info.", ShortDescription: ` Prints out information about the specified peer. If no peer is specified, prints out information for local peers. 'ipfs id' supports the format option for output with the following keys: <id> : The peers id. <aver>: Agent version. <pver>: Protocol version. <pubkey>: Public key. <addrs>: Addresses (newline delimited). EXAMPLE: ipfs id Qmece2RkXhsKe5CRooNisBTh4SK119KrXXGmoK6V3kb8aH -f="<addrs>\n" `, }, Arguments: []cmds.Argument{ cmds.StringArg("peerid", false, false, "Peer.ID of node to look up."), }, Options: []cmds.Option{ cmds.StringOption(formatOptionName, "f", "Optional output format."), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { n, err := cmdenv.GetNode(env) if err != nil { return err } var id peer.ID if len(req.Arguments) > 0 { var err error id, err = peer.IDB58Decode(req.Arguments[0]) if err != nil { return fmt.Errorf("invalid peer id") } } else { id = n.Identity } if id == n.Identity
// TODO handle offline mode with polymorphism instead of conditionals if !n.IsOnline { return errors.New(offlineIdErrorMessage) } p, err := n.Routing.FindPeer(req.Context, id) if err == kb.ErrLookupFailure { return errors.New(offlineIdErrorMessage) } if err != nil { return err } output, err := printPeer(n.Peerstore, p.ID) if err != nil { return err } return cmds.EmitOnce(res, output) }, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *IdOutput) error { format, found := req.Options[formatOptionName].(string) if found { output := format output = strings.Replace(output, "<id>", out.ID, -1) output = strings.Replace(output, "<aver>", out.AgentVersion, -1) output = strings.Replace(output, "<pver>", out.ProtocolVersion, -1) output = strings.Replace(output, "<pubkey>", out.PublicKey, -1) output = strings.Replace(output, "<addrs>", strings.Join(out.Addresses, "\n"), -1) output = strings.Replace(output, "\\n", "\n", -1) output = strings.Replace(output, "\\t", "\t", -1) fmt.Fprint(w, output) } else { marshaled, err := json.MarshalIndent(out, "", "\t") if err != nil { return err } marshaled = append(marshaled, byte('\n')) fmt.Fprintln(w, string(marshaled)) } return nil }), }, Type: IdOutput{}, } func printPeer(ps pstore.Peerstore, p peer.ID) (interface{}, error) { if p == "" { return nil, errors.New("attempted to print nil peer") } info := new(IdOutput) info.ID = p.Pretty() if pk := ps.PubKey(p); pk != nil { pkb, err := ic.MarshalPublicKey(pk) if err != nil { return nil, err } info.PublicKey = base64.StdEncoding.EncodeToString(pkb) } for _, a := range ps.Addrs(p) { info.Addresses = append(info.Addresses, a.String()) } if v, err := ps.Get(p, "ProtocolVersion"); err == nil { if vs, ok := v.(string); ok { info.ProtocolVersion = vs } } if v, err := ps.Get(p, "AgentVersion"); err == nil { if vs, ok := v.(string); ok { info.AgentVersion = vs } } return info, nil } // printing self is special cased as we get values differently. func printSelf(node *core.IpfsNode) (interface{}, error) { info := new(IdOutput) info.ID = node.Identity.Pretty() pk := node.PrivateKey.GetPublic() pkb, err := ic.MarshalPublicKey(pk) if err != nil { return nil, err } info.PublicKey = base64.StdEncoding.EncodeToString(pkb) if node.PeerHost != nil { for _, a := range node.PeerHost.Addrs() { s := a.String() + "/ipfs/" + info.ID info.Addresses = append(info.Addresses, s) } } info.ProtocolVersion = identify.LibP2PVersion info.AgentVersion = identify.ClientVersion return info, nil }
{ output, err := printSelf(n) if err != nil { return err } return cmds.EmitOnce(res, output) }
utils.py
import sys from collections import Iterable from path2insight import WindowsFilePath, PosixFilePath PATH_OBJECT_TYPES = (WindowsFilePath, PosixFilePath) # ---------------------------------------------------- class VisibleDeprecationWarning(UserWarning):
def MissingDependencyError(Exception): """Optional dependency not available.""" pass def _import_jellyfish(): """Check if jellyfish is installed.""" try: from jellyfish import levenshtein_distance as lv return lv except ModuleNotFoundError: raise MissingDependencyError( "Install the module 'jellyfish' to compute string distances.") def iteritems(d): """Python 2, 3 compatibility.""" try: return d.items() except AttributeError: return d.iteritems() def unique(l): """Return a list with unique elements""" return list(set(l)) # ---------------------------------------------------- # the following path is largely based / taken from the six module and pandas PY3 = (sys.version_info[0] >= 3) if PY3: string_types = str, binary_type = bytes else: string_types = basestring, binary_type = str string_and_binary_types = (string_types,) + (binary_type,) def is_list_like(obj): """ Check if the object is list-like. Objects that are considered list-like are for example Python lists, tuples, sets, NumPy arrays, and Pandas Series. Strings and datetime objects, however, are not considered list-like. Parameters ---------- obj : The object to check. Returns ------- is_list_like : bool Whether `obj` has list-like properties. Examples -------- >>> is_list_like([1, 2, 3]) True >>> is_list_like({1, 2, 3}) True >>> is_list_like(datetime(2017, 1, 1)) False >>> is_list_like("foo") False >>> is_list_like(1) False """ return (isinstance(obj, Iterable) and not isinstance(obj, string_types + (binary_type,)))
"""Visible deprecation warning. Based on numpy's VisibleDeprecationWarning. """ pass
tactics.py
from const import result import random C, D = True, False def opponent(r): if r == result.COOP or r == result.DEFECT: return True return False # tit for tat class Tft: def __init__(self) -> None: self.score = 0 self.last_reaction = C def run(self): return self.last_reaction def next(self, r): self.score += r.value self.last_reaction = opponent(r) def end(self): self.last_reaction = C return self.score # tit for two tat class Tftt: def __init__(self) -> None: self.score = 0 self.last_reaction = C self.last_last_reaction = C def run(self): return self.last_reaction | self.last_last_reaction def next(self, r): self.score += r.value self.last_last_reaction = self.last_reaction self.last_reaction = opponent(r) def end(self): self.last_reaction = C self.last_last_reaction = C return self.score # always coop class AlwaysCoop: def __init__(self) -> None: self.score = 0 def run(self): return C def next(self, r): self.score += r.value def end(self): return self.score # always defect class AlwaysDefect: def __init__(self) -> None: self.score = 0 def run(self): return D def next(self, r): self.score += r.value def end(self): return self.score # perfect random(50%) class Random: def __init__(self) -> None: self.score = 0 def run(self): return random.choice([C, D]) def next(self, r): self.score += r.value def end(self): return self.score # first defect, opponent coop rate - coop(>50%) / defect(<=50%) class Downing: def __init__(self) -> None: self.score = 0 self.game_count = 0 self.coop_count = 0 def run(self): if self.game_count == 0: return D if self.coop_count / self.game_count > 0.5: return C return D def next(self, r): self.score += r.value self.game_count += 1 if opponent(r): self.coop_count += 1 def end(self): self.game_count = self.coop_count = 0 return self.score # first coop, opponent coop rate - coop(>=50%) / defect(<50%) class Downing2: def __init__(self) -> None: self.score = 0 self.game_count = 0 self.coop_count = 0 def run(self): if self.game_count == 0: return C if self.coop_count / self.game_count >= 0.5:
return D def next(self, r): self.score += r.value self.game_count += 1 if opponent(r): self.coop_count += 1 def end(self): self.game_count = self.coop_count = 0 return self.score # coop, always defect once defected class Grudger: def __init__(self) -> None: self.score = 0 self.defected = False def run(self): if self.defected: return D return C def next(self, r): self.score += r.value if not opponent(r): self.defected = True def end(self): return self.score # tft but defect by 10% rate class Joss: def __init__(self) -> None: self.score = 0 self.last_reaction = C def run(self): if random.randint(1, 10) == 1: return D return self.last_reaction def next(self, r): self.score += r.value self.last_reaction = opponent(r) def end(self): self.last_reaction = C return self.score # wip class Tester: def __init__(self) -> None: self.score = 0 self.decision = True self.test_tft = False self.game_count = 0 def run(self): if self.game_count == 0: return D return self.decision def next(self, r): self.score += r.value if self.game_count == 1 & (not opponent(r)): self.test_tft = True elif self.test_tft: self.decision = opponent(r) elif self.game_count <= 2: self.decision = True else: self.decision = not self.decision self.game_count += 1 def end(self): self.decision = True self.test_tft = False self.game_count = 0 return self.score
return C