file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
GetIdentityDkimAttributesCommand.ts
|
import { getSerdePlugin } from "@aws-sdk/middleware-serde";
import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http";
import { Command as $Command } from "@aws-sdk/smithy-client";
import {
FinalizeHandlerArguments,
Handler,
HandlerExecutionContext,
HttpHandlerOptions as __HttpHandlerOptions,
MetadataBearer as __MetadataBearer,
MiddlewareStack,
SerdeContext as __SerdeContext,
} from "@aws-sdk/types";
import { GetIdentityDkimAttributesRequest, GetIdentityDkimAttributesResponse } from "../models/models_0";
import {
deserializeAws_queryGetIdentityDkimAttributesCommand,
serializeAws_queryGetIdentityDkimAttributesCommand,
} from "../protocols/Aws_query";
import { ServiceInputTypes, ServiceOutputTypes, SESClientResolvedConfig } from "../SESClient";
export interface GetIdentityDkimAttributesCommandInput extends GetIdentityDkimAttributesRequest {}
export interface GetIdentityDkimAttributesCommandOutput extends GetIdentityDkimAttributesResponse, __MetadataBearer {}
/**
* <p>Returns the current status of Easy DKIM signing for an entity. For domain name
* identities, this operation also returns the DKIM tokens that are required for Easy DKIM
* signing, and whether Amazon SES has successfully verified that these tokens have been
* published.</p>
* <p>This operation takes a list of identities as input and returns the following
* information for each:</p>
* <ul>
* <li>
* <p>Whether Easy DKIM signing is enabled or disabled.</p>
* </li>
* <li>
* <p>A set of DKIM tokens that represent the identity. If the identity is an email
* address, the tokens represent the domain of that address.</p>
* </li>
* <li>
* <p>Whether Amazon SES has successfully verified the DKIM tokens published in the
* domain's DNS. This information is only returned for domain name identities, not
* for email addresses.</p>
* </li>
* </ul>
* <p>This operation is throttled at one request per second and can only get DKIM attributes
* for up to 100 identities at a time.</p>
* <p>For more information about creating DNS records using DKIM tokens, go to the <a href="https://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html">Amazon SES Developer Guide</a>.</p>
* @example
* Use a bare-bones client and the command you need to make an API call.
* ```javascript
* import { SESClient, GetIdentityDkimAttributesCommand } from "@aws-sdk/client-ses"; // ES Modules import
* // const { SESClient, GetIdentityDkimAttributesCommand } = require("@aws-sdk/client-ses"); // CommonJS import
* const client = new SESClient(config);
* const command = new GetIdentityDkimAttributesCommand(input);
* const response = await client.send(command);
* ```
*
* @see {@link GetIdentityDkimAttributesCommandInput} for command's `input` shape.
* @see {@link GetIdentityDkimAttributesCommandOutput} for command's `response` shape.
* @see {@link SESClientResolvedConfig | config} for command's `input` shape.
*
*/
export class
|
extends $Command<
GetIdentityDkimAttributesCommandInput,
GetIdentityDkimAttributesCommandOutput,
SESClientResolvedConfig
> {
// Start section: command_properties
// End section: command_properties
constructor(readonly input: GetIdentityDkimAttributesCommandInput) {
// Start section: command_constructor
super();
// End section: command_constructor
}
/**
* @internal
*/
resolveMiddleware(
clientStack: MiddlewareStack<ServiceInputTypes, ServiceOutputTypes>,
configuration: SESClientResolvedConfig,
options?: __HttpHandlerOptions
): Handler<GetIdentityDkimAttributesCommandInput, GetIdentityDkimAttributesCommandOutput> {
this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize));
const stack = clientStack.concat(this.middlewareStack);
const { logger } = configuration;
const clientName = "SESClient";
const commandName = "GetIdentityDkimAttributesCommand";
const handlerExecutionContext: HandlerExecutionContext = {
logger,
clientName,
commandName,
inputFilterSensitiveLog: GetIdentityDkimAttributesRequest.filterSensitiveLog,
outputFilterSensitiveLog: GetIdentityDkimAttributesResponse.filterSensitiveLog,
};
const { requestHandler } = configuration;
return stack.resolve(
(request: FinalizeHandlerArguments<any>) =>
requestHandler.handle(request.request as __HttpRequest, options || {}),
handlerExecutionContext
);
}
private serialize(input: GetIdentityDkimAttributesCommandInput, context: __SerdeContext): Promise<__HttpRequest> {
return serializeAws_queryGetIdentityDkimAttributesCommand(input, context);
}
private deserialize(
output: __HttpResponse,
context: __SerdeContext
): Promise<GetIdentityDkimAttributesCommandOutput> {
return deserializeAws_queryGetIdentityDkimAttributesCommand(output, context);
}
// Start section: command_body_extra
// End section: command_body_extra
}
|
GetIdentityDkimAttributesCommand
|
App.js
|
import React from 'react';
import './App.css';
import UserList from './components/UserList';
function App() {
return (
|
}
export default App;
|
<div className="App">
<UserList />
</div>
);
|
maintainer.rs
|
// Copyright (c) 2018 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use clap::ArgMatches;
use serde_json;
use std::result;
use std::str::FromStr;
use std::string::ToString;
use url::Url;
use crate::export_docker::Result;
use crate::error::Error;
#[derive(Clone, Debug)]
pub struct Maintainer {
name: String,
email: Option<String>,
url: Option<String>,
}
impl Maintainer {
pub fn from_args(matches: &ArgMatches<'_>) -> Result<Vec<Self>>
|
pub fn to_json(&self) -> serde_json::Value {
json!({
"name": self.name,
"email": self.email,
"url": self.url,
})
}
}
impl FromStr for Maintainer {
type Err = Error;
/// Creates a `Maintainer` struct from a string representation, which must be of the format
/// `NAME[,EMAIL[,URL]]`.
///
/// # Errors
///
/// * `maintainer_str` is not of the format `NAME[,EMAIL[,URL]`
/// * An invalid URL is specified
fn from_str(maintainer_str: &str) -> result::Result<Self, Self::Err> {
let values: Vec<&str> = maintainer_str.split(',').collect();
if values.is_empty() || values.len() > 3 {
return Err(Error::InvalidMaintainer(maintainer_str.to_owned()));
}
let name = values[0].to_string();
// FIXME: Check validity of email address
let email = values.get(1).map(|&s| s.to_owned());
let url = values.get(2).map(|&s| s.to_owned());
if let Some(ref u) = url {
Url::parse(&u).map_err(|e| Error::InvalidUrl(u.to_owned(), format!("{}", e)))?;
};
Ok(Maintainer { name, email, url })
}
}
|
{
let mut maintainers = Vec::new();
if let Some(args) = matches.values_of("MAINTAINER") {
for arg in args {
let m = arg.parse::<Self>()?;
maintainers.push(m);
}
};
Ok(maintainers)
}
|
lib.rs
|
// Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use std::{
env,
net::{SocketAddr, ToSocketAddrs},
path::PathBuf,
};
/// Get the environment variable or panic
fn get_var(name: &str) -> String {
env::var(name).unwrap_or_else(|_| panic!("{} environment variable is required.", name))
}
/// Convert a given host and port to a `SocketAddr` or panic
fn to_socket_addr(host: &str, port: &str) -> SocketAddr {
let raw_addr = format!("{}:{}", host, port);
let mut addrs_iter = raw_addr.to_socket_addrs().unwrap_or_else(|_| {
panic!(
"Address not parsable to SocketAddr. host: {}, port: {}",
host, port
)
});
addrs_iter
.next()
.expect("Could not convert to a SocketAddr")
}
fn empty_str_to_none(x: String) -> Option<String> {
match x.as_ref() {
"" => None,
_ => Some(x),
}
}
fn string_to_bool(x: String) -> bool {
match x.as_ref() {
"true" => true,
_ => false,
}
}
/// Get anonymous read permission from the env or panic
pub fn get_allow_anonymous_read() -> bool {
string_to_bool(get_var("ALLOW_ANONYMOUS_READ"))
}
/// Get build num from the env or panic
pub fn get_build() -> String {
get_var("BUILD")
}
/// Get is release from the env or panic
pub fn get_is_release() -> bool {
string_to_bool(get_var("IS_RELEASE"))
}
/// Get version from the env or panic
pub fn get_version() -> String {
get_var("VERSION")
}
/// Get the broker URL from the env or panic
pub fn get_amqp_broker_url() -> String {
get_var("AMQP_BROKER_URL")
}
/// Get the broker user from the env or panic
pub fn get_user() -> String {
get_var("AMQP_BROKER_USER")
}
/// Get the broker password from the env or panic
pub fn get_password() -> String {
get_var("AMQP_BROKER_PASSWORD")
}
/// Get the broker vhost from the env or panic
pub fn get_vhost() -> String {
get_var("AMQP_BROKER_VHOST")
}
/// Get the broker host from the env or panic
pub fn get_host() -> String {
get_var("AMQP_BROKER_HOST")
}
/// Get the broker port from the env or panic
pub fn get_port() -> String {
get_var("AMQP_BROKER_PORT")
}
/// Get the IML API port from the env or panic
pub fn get_iml_api_port() -> String {
get_var("IML_API_PORT")
}
/// Get the IML API address from the env or panic
pub fn get_iml_api_addr() -> SocketAddr {
to_socket_addr(&get_server_host(), &get_iml_api_port())
}
/// Get the `http_agent2` port from the env or panic
pub fn get_http_agent2_port() -> String {
get_var("HTTP_AGENT2_PORT")
}
pub fn get_http_agent2_addr() -> SocketAddr {
to_socket_addr(&get_server_host(), &get_http_agent2_port())
}
/// Get the server host from the env or panic
pub fn get_server_host() -> String {
get_var("PROXY_HOST")
}
/// Get the AMQP server address or panic
pub fn get_addr() -> SocketAddr {
to_socket_addr(&get_host(), &get_port())
}
/// Get the warp drive port from the env or panic
pub fn get_warp_drive_port() -> String {
get_var("WARP_DRIVE_PORT")
}
/// Get the warp drive address from the env or panic
pub fn get_warp_drive_addr() -> SocketAddr {
to_socket_addr(&get_server_host(), &get_warp_drive_port())
}
/// Get the mailbox port from the env or panic
pub fn get_mailbox_port() -> String {
get_var("MAILBOX_PORT")
}
/// Get the mailbox address from the env or panic
pub fn get_mailbox_addr() -> SocketAddr {
to_socket_addr(&get_server_host(), &get_mailbox_port())
}
/// Get the timer port
pub fn get_timer_port() -> String {
get_var("TIMER_PORT")
}
/// Get the timer address from the env or panic
pub fn get_timer_addr() -> SocketAddr {
to_socket_addr(&get_server_host(), &get_timer_port())
}
/// Get the influxdb port from the env or panic
pub fn get_influxdb_port() -> String {
get_var("INFLUXDB_PORT")
}
/// Get the influxdb address from the env or panic
pub fn get_influxdb_addr() -> SocketAddr {
to_socket_addr(&get_server_host(), &get_influxdb_port())
}
/// Get the metrics influxdb database name
pub fn get_influxdb_metrics_db() -> String {
get_var("INFLUXDB_IML_STATS_DB")
}
/// Get the path to the mailbox from the env or panic
pub fn get_mailbox_path() -> PathBuf {
get_var("MAILBOX_PATH").into()
}
/// Get the devices port or panic
pub fn get_device_aggregator_port() -> String {
get_var("DEVICE_AGGREGATOR_PORT")
}
pub fn get_device_aggregator_addr() -> SocketAddr {
to_socket_addr(&get_server_host(), &get_device_aggregator_port())
}
/// Get the api key from the env or panic
pub fn get_api_key() -> String {
get_var("API_KEY")
}
/// Get the api user from the env or panic
pub fn get_api_user() -> String {
get_var("API_USER")
}
pub fn get_manager_url() -> String {
get_var("SERVER_HTTP_URL")
}
pub fn get_db_user() -> String {
get_var("DB_USER")
}
pub fn get_db_host() -> Option<String> {
empty_str_to_none(get_var("DB_HOST"))
}
pub fn get_db_name() -> Option<String> {
empty_str_to_none(get_var("DB_NAME"))
}
pub fn get_db_password() -> Option<String> {
empty_str_to_none(get_var("DB_PASSWORD"))
}
pub fn get_branding() -> String {
get_var("BRANDING")
}
pub fn get_use_stratagem() -> bool
|
/// Gets a connection string from the IML env
pub fn get_db_conn_string() -> String {
let mut xs = vec![format!("user={}", get_db_user())];
let host = match get_db_host() {
Some(x) => x,
None => "/var/run/postgresql".into(),
};
xs.push(format!("host={}", host));
if let Some(x) = get_db_name() {
xs.push(format!("dbname={}", x));
}
if let Some(x) = get_db_password() {
xs.push(format!("password={}", x));
}
xs.join(" ")
}
|
{
string_to_bool(get_var("USE_STRATAGEM"))
}
|
RendererFactory.ts
|
import {OptionsProps} from "../app/Options";
import Renderer, { RendererProps } from "./Renderer";
import ElectronRenderer from "./electron/ElectronRenderer";
import NodeRenderer from "./node/NodeRenderer";
export interface RendererOptions {
tmpName?: string
options: OptionsProps
}
export default class
|
{
/**
*
* @param options
* @return {any}
*/
static createRenderer<T extends RendererProps>(options: RendererOptions): Renderer<T> {
if(options.options.renderer) {
return new ElectronRenderer(options);
} else {
return new NodeRenderer(options);
}
}
}
|
RendererFactory
|
http_handler.go
|
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zap
import (
"encoding/json"
"fmt"
"github.com/studyzy/net/http"
"go.uber.org/zap/zapcore"
)
// ServeHTTP is a simple JSON endpoint that can report on or change the current
// logging level.
//
// GET requests return a JSON description of the current logging level. PUT
// requests change the logging level and expect a payload like:
// {"level":"info"}
//
// It's perfectly safe to change the logging level while a program is running.
func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
type errorResponse struct {
Error string `json:"error"`
}
type payload struct {
Level *zapcore.Level `json:"level"`
}
enc := json.NewEncoder(w)
switch r.Method {
case http.MethodGet:
current := lvl.Level()
enc.Encode(payload{Level: ¤t})
case http.MethodPut:
var req payload
if errmess := func() string {
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
return fmt.Sprintf("Request body must be well-formed JSON: %v", err)
}
if req.Level == nil {
return "Must specify a logging level."
}
return ""
}(); errmess != "" {
w.WriteHeader(http.StatusBadRequest)
enc.Encode(errorResponse{Error: errmess})
return
}
lvl.SetLevel(*req.Level)
enc.Encode(req)
default:
w.WriteHeader(http.StatusMethodNotAllowed)
enc.Encode(errorResponse{
Error: "Only GET and PUT are supported.",
})
}
}
|
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
SwitchContent.tsx
|
/*
** StandoffCase Copyright (C) 2020 sunaiclub
** Full License is in the root directory
*/
// SCSS
import "../../../assets/scss/components/switch-content.scss"
// STAFF
import { Fragment, ReactElement, useEffect, useMemo, useState } from "react";
import { classWithModifiers } from "../../../resources/utils";
import { RegularObject } from "../../../resources/interfaces/Object";
interface SwitchContentProps {
menu: string | string[]
type?: "dependent" | "custom-content"
style?: string
defaultValue?: string
children: ReactElement<SwitchContentRouteProps>[]
}
interface SwitchContentRouteProps {
path?: string
hint?: any
children: any
}
export default function SwitchContent({ menu: rawMenu, type, style, defaultValue, children }: SwitchContentProps) {
const menu = typeof rawMenu === "string" ? rawMenu.split(", ") : rawMenu
const [items, setItems] = useState(menu)
const [choice, Choose] = useState(defaultValue || items[0])
useEffect(() => setItems(menu), [menu])
return (
<div className={classWithModifiers("switch-content", [type])}>
<div className={classWithModifiers("switch-content-menu", [style])}>
{items.map((item, index) => (
<div className={classWithModifiers("switch-content-menu__link", [item === choice ? "active" : null])} onClick={() => Choose(item)} key={"switch_link_" + index}>
<div className="switch-content-menu__text">{item}</div>
{children[index]?.props?.hint && <span className="switch-content-menu__hint">{children[index].props.hint}</span>}
</div>
))}
</div>
<div className="switch-content__container">
{children.filter(child => child.props.path === choice)}
</div>
|
export function SwitchContentRoute({ path, children }: SwitchContentRouteProps) {
return <Fragment key={path}>{children}</Fragment>
}
|
</div>
)
}
|
index.d.ts
|
// Type definitions for Pixi.js 4.5
// Project: https://github.com/pixijs/pixi.js/tree/dev
// Definitions by: clark-stevenson <https://github.com/pixijs/pixi-typescript>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 2.1
declare namespace PIXI {
// from CONST
const VERSION: typeof CONST.VERSION;
const PI_2: typeof CONST.PI_2;
const RAD_TO_DEG: typeof CONST.RAD_TO_DEG;
const DEG_TO_RAD: typeof CONST.DEG_TO_RAD;
const RENDERER_TYPE: typeof CONST.RENDERER_TYPE;
const BLEND_MODES: typeof CONST.BLEND_MODES;
const DRAW_MODES: typeof CONST.DRAW_MODES;
const SCALE_MODES: typeof CONST.SCALE_MODES;
const WRAP_MODES: typeof CONST.WRAP_MODES;
const TRANSFORM_MODE: typeof CONST.TRANSFORM_MODE;
const PRECISION: typeof CONST.PRECISION;
const GC_MODES: typeof CONST.GC_MODES;
const SHAPES: typeof CONST.SHAPES;
const TEXT_GRADIENT: typeof CONST.TEXT_GRADIENT;
const UPDATE_PRIORITY: typeof CONST.UPDATE_PRIORITY;
function autoDetectRenderer(width: number, height: number, options?: PIXI.RendererOptions, forceCanvas?: boolean): PIXI.WebGLRenderer | PIXI.CanvasRenderer;
function autoDetectRenderer(options?: PIXI.RendererOptions): PIXI.WebGLRenderer | PIXI.CanvasRenderer;
const loader: PIXI.loaders.Loader;
//////////////////////////////////////////////////////////////////////////////
///////////////////////////////SETTINGS///////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
namespace settings {
let TARGET_FPMS: number;
let MIPMAP_TEXTURES: boolean;
let RESOLUTION: number;
let FILTER_RESOLUTION: number;
let SPRITE_MAX_TEXTURES: number;
let SPRITE_BATCH_SIZE: number;
let RETINA_PREFIX: RegExp;
const RENDER_OPTIONS: {
view: HTMLCanvasElement | null,
antialias: boolean,
forceFXAA: boolean,
autoResize: boolean,
transparent: boolean,
backgroundColor: number,
clearBeforeRender: boolean,
preserveDrawingBuffer: boolean,
roundPixels: boolean
width: number,
height: number,
legacy: boolean,
};
let TRANSFORM_MODE: number;
let GC_MODE: number;
let GC_MAX_IDLE: number;
let GC_MAX_CHECK_COUNT: number;
let WRAP_MODE: number;
let SCALE_MODE: number;
let PRECISION_VERTEX: string;
let PRECISION_FRAGMENT: string;
let PRECISION: string;
let UPLOADS_PER_FRAME: number;
let CAN_UPLOAD_SAME_BUFFER: boolean;
}
//////////////////////////////////////////////////////////////////////////////
/////////////////////////////ACCESSIBILITY////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
namespace accessibility {
// accessibility
class AccessibilityManager {
constructor(renderer: CanvasRenderer | WebGLRenderer);
activate(): void;
deactivate(): void;
protected div: HTMLElement;
protected pool: HTMLElement[];
protected renderId: number;
debug: boolean;
renderer: SystemRenderer;
protected children: AccessibleTarget[];
protected isActive: boolean;
protected updateAccessibleObjects(displayObject: DisplayObject): void;
protected update(): void;
protected capHitArea(hitArea: HitArea): void;
protected addChild(displayObject: DisplayObject): void;
protected _onClick(e: interaction.InteractionEvent): void;
protected _onFocus(e: interaction.InteractionEvent): void;
protected _onFocusOut(e: interaction.InteractionEvent): void;
protected _onKeyDown(e: interaction.InteractionEvent): void;
protected _onMouseMove(): void;
destroy(): void;
}
interface AccessibleTarget {
accessible: boolean;
accessibleTitle: string | null;
accessibleHint: string | null;
tabIndex: number;
}
}
//////////////////////////////////////////////////////////////////////////////
////////////////////////////////CORE//////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
// const
namespace CONST {
const VERSION: string;
const PI_2: number;
const RAD_TO_DEG: number;
const DEG_TO_RAD: number;
const TARGET_FPMS: number;
const RENDERER_TYPE: {
UNKNOWN: number;
WEBGL: number;
CANVAS: number;
};
const BLEND_MODES: {
NORMAL: number;
ADD: number;
MULTIPLY: number;
SCREEN: number;
OVERLAY: number;
DARKEN: number;
LIGHTEN: number;
COLOR_DODGE: number;
COLOR_BURN: number;
HARD_LIGHT: number;
SOFT_LIGHT: number;
DIFFERENCE: number;
EXCLUSION: number;
HUE: number;
SATURATION: number;
COLOR: number;
LUMINOSITY: number;
NORMAL_NPM: number;
ADD_NPM: number;
SCREEN_NPM: number;
};
const DRAW_MODES: {
POINTS: number;
LINES: number;
LINE_LOOP: number;
LINE_STRIP: number;
TRIANGLES: number;
TRIANGLE_STRIP: number;
TRIANGLE_FAN: number;
};
const SCALE_MODES: {
LINEAR: number,
NEAREST: number
};
const GC_MODES: {
AUTO: number;
MANUAL: number;
};
const WRAP_MODES: {
CLAMP: number;
MIRRORED_REPEAT: number;
REPEAT: number;
};
const TRANSFORM_MODE: {
DEFAULT: number;
DYNAMIC: number;
STATIC: number;
};
const URL_FILE_EXTENSION: RegExp | string;
const DATA_URI: RegExp | string;
const SVG_SIZE: RegExp | string;
const SHAPES: {
POLY: number;
RECT: number;
CIRC: number;
ELIP: number;
RREC: number;
};
const PRECISION: {
LOW: string;
MEDIUM: string;
HIGH: string;
};
const TEXT_GRADIENT: {
LINEAR_VERTICAL: number;
LINEAR_HORIZONTAL: number;
};
const UPDATE_PRIORITY: {
INTERACTION: number;
HIGH: number;
NORMAL: number;
LOW: number;
UTILITY: number;
};
}
// display
class Application {
constructor(options?: ApplicationOptions)
constructor(width?: number, height?: number, options?: ApplicationOptions, noWebGL?: boolean, sharedTicker?: boolean, sharedLoader?: boolean);
private _ticker: ticker.Ticker;
renderer: PIXI.WebGLRenderer | PIXI.CanvasRenderer;
stage: Container;
ticker: ticker.Ticker;
readonly screen: Rectangle;
stop(): void;
start(): void;
render(): void;
destroy(removeView?: boolean): void;
readonly view: HTMLCanvasElement;
}
interface DestroyOptions {
children?: boolean;
texture?: boolean;
baseTexture?: boolean;
}
class Bounds {
minX: number;
minY: number;
maxX: number;
maxY: number;
rect: Rectangle;
isEmpty(): boolean;
clear(): void;
getRectangle(rect?: Rectangle): Rectangle;
addPoint(point: Point): void;
addQuad(vertices: number[]): Bounds | undefined;
addFrame(transform: Transform, x0: number, y0: number, x1: number, y1: number): void;
addVertices(transform: Transform, vertices: number[], beginOffset: number, endOffset: number): void;
addBounds(bounds: Bounds): void;
addBoundsMask(bounds: Bounds, mask: Bounds): void;
addBoundsArea(bounds: Bounds, area: Rectangle): void;
}
class Container extends DisplayObject {
// begin extras.getChildByName
getChildByName(name: string): DisplayObject;
// end extras.getChildByName
children: DisplayObject[];
width: number;
height: number;
protected onChildrenChange: (...args: any[]) => void;
addChild<T extends DisplayObject>(child: T, ...additionalChildren: DisplayObject[]): T;
addChildAt<T extends DisplayObject>(child: T, index: number): T;
swapChildren(child: DisplayObject, child2: DisplayObject): void;
getChildIndex(child: DisplayObject): number;
setChildIndex(child: DisplayObject, index: number): void;
getChildAt(index: number): DisplayObject;
removeChild(child: DisplayObject): DisplayObject;
removeChildAt(index: number): DisplayObject;
removeChildren(beginIndex?: number, endIndex?: number): DisplayObject[];
updateTransform(): void;
calculateBounds(): void;
protected _calculateBounds(): void;
protected containerUpdateTransform(): void;
renderWebGL(renderer: WebGLRenderer): void;
renderAdvancedWebGL(renderer: WebGLRenderer): void;
protected _renderWebGL(renderer: WebGLRenderer): void;
protected _renderCanvas(renderer: CanvasRenderer): void;
renderCanvas(renderer: CanvasRenderer): void;
destroy(options?: DestroyOptions | boolean): void;
once(event: "added" | "removed", fn: (displayObject: DisplayObject) => void, context?: any): this;
//tslint:disable-next-line:ban-types forbidden-types
once(event: string, fn: Function, context?: any): this;
on(event: "added" | "removed", fn: (displayObject: DisplayObject) => void, context?: any): this;
//tslint:disable-next-line:ban-types forbidden-types
on(event: string, fn: Function, context?: any): this;
//tslint:disable-next-line:ban-types forbidden-types
off(event: "added" | "removed" | string, fn?: Function, context?: any): this;
}
class DisplayObject extends utils.EventEmitter implements interaction.InteractiveTarget, accessibility.AccessibleTarget {
// begin extras.cacheAsBitmap
protected _cacheAsBitmap: boolean;
protected _cacheData: boolean;
cacheAsBitmap: boolean;
protected _renderCachedWebGL(renderer: WebGLRenderer): void;
protected _initCachedDisplayObject(renderer: WebGLRenderer): void;
protected _renderCachedCanvas(renderer: CanvasRenderer): void;
protected _initCachedDisplayObjectCanvas(renderer: CanvasRenderer): void;
protected _calculateCachedBounds(): Rectangle;
protected _getCachedLocalBounds(): Rectangle;
protected _destroyCachedDisplayObject(): void;
protected _cacheAsBitmapDestroy(options: boolean | any): void;
// end extras.cacheAsBitmap
// begin extras.getChildByName
name: string | null;
// end extras.getChildByName
// begin extras.getGlobalPosition
getGlobalPosition(point?: Point, skipUpdate?: boolean): Point;
// end extras.getGlobalPosition
// begin accessible target
accessible: boolean;
accessibleTitle: string | null;
accessibleHint: string | null;
tabIndex: number;
// end accessible target
// begin interactive target
interactive: boolean;
interactiveChildren: boolean;
hitArea: PIXI.Rectangle | PIXI.Circle | PIXI.Ellipse | PIXI.Polygon | PIXI.RoundedRectangle;
buttonMode: boolean;
cursor: string;
trackedPointers(): { [key: number]: interaction.InteractionTrackingData; };
// depricated
defaultCursor: string;
// end interactive target
transform: TransformBase;
alpha: number;
visible: boolean;
renderable: boolean;
parent: Container;
worldAlpha: number;
filterArea: Rectangle;
protected _filters: Filter[] | null;
protected _enabledFilters: Filter[] | null;
protected _bounds: Bounds;
protected _boundsID: number;
protected _lastBoundsID: number;
protected _boundsRect: Rectangle;
protected _localBoundsRect: Rectangle;
protected _mask: PIXI.Graphics | PIXI.Sprite;
protected readonly _destroyed: boolean;
x: number;
y: number;
worldTransform: Matrix;
localTransform: Matrix;
position: Point | ObservablePoint;
scale: Point | ObservablePoint;
pivot: Point | ObservablePoint;
skew: ObservablePoint;
rotation: number;
worldVisible: boolean;
mask: PIXI.Graphics | PIXI.Sprite;
filters: Filter[] | null;
updateTransform(): void;
protected displayObjectUpdateTransform(): void;
protected _recursivePostUpdateTransform(): void;
getBounds(skipUpdate?: boolean, rect?: Rectangle): Rectangle;
getLocalBounds(rect?: Rectangle): Rectangle;
//creates and returns a new point
toGlobal(position: PointLike): Point;
//modifies the x and y of the passed point and returns it
toGlobal<T extends PointLike>(position: PointLike, point?: T, skipUpdate?: boolean): T;
//creates and returns a new point
toLocal(position: PointLike, from?: DisplayObject): Point;
//modifies the x and y of the passed point and returns it
toLocal<T extends PointLike>(position: PointLike, from?: DisplayObject, point?: T, skipUpdate?: boolean): T;
renderWebGL(renderer: WebGLRenderer): void;
renderCanvas(renderer: CanvasRenderer): void;
setParent(container: Container): Container;
setTransform(x?: number, y?: number, scaleX?: number, scaleY?: number, rotation?: number, skewX?: number, skewY?: number, pivotX?: number, pivotY?: number): DisplayObject;
destroy(): void;
on(event: interaction.InteractionEventTypes, fn: (event: interaction.InteractionEvent) => void, context?: any): this;
once(event: interaction.InteractionEventTypes, fn: (event: interaction.InteractionEvent) => void, context?: any): this;
removeListener(event: interaction.InteractionEventTypes, fn?: (event: interaction.InteractionEvent) => void, context?: any): this;
removeAllListeners(event: interaction.InteractionEventTypes): this;
off(event: interaction.InteractionEventTypes, fn?: (event: interaction.InteractionEvent) => void, context?: any): this;
addListener(event: interaction.InteractionEventTypes, fn: (event: interaction.InteractionEvent) => void, context?: any): this;
}
class TransformBase {
static IDENTITY: TransformBase;
worldTransform: Matrix;
localTransform: Matrix;
protected _worldID: number;
updateLocalTransform(): void;
updateTransform(parentTransform: TransformBase): void;
updateWorldTransform(parentTransform: TransformBase): void;
}
class TransformStatic extends TransformBase {
position: ObservablePoint;
scale: ObservablePoint;
pivot: ObservablePoint;
skew: ObservablePoint;
protected _rotation: number;
protected _sr?: number;
protected _cr?: number;
protected _cy?: number;
protected _sy?: number;
protected _nsx?: number;
protected _cx?: number;
protected _currentLocalID: number;
protected onChange(): void;
updateSkew(): void;
updateLocalTransform(): void;
updateTransform(parentTransform: TransformBase): void;
setFromMatrix(matrix: Matrix): void;
rotation: number;
}
class Transform extends TransformBase {
constructor();
position: Point;
scale: Point;
skew: ObservablePoint;
pivot: Point;
protected _rotation: number;
protected _sr?: number;
protected _cr?: number;
protected _cy?: number;
protected _sy?: number;
protected _nsx?: number;
protected _cx?: number;
updateSkew(): void;
setFromMatrix(matrix: Matrix): void;
rotation: number;
}
// graphics
class GraphicsData {
constructor(
lineWidth: number,
lineColor: number,
lineAlpha: number,
fillColor: number,
fillAlpha: number,
fill: boolean,
nativeLines: boolean,
shape: Circle | Rectangle | Ellipse | Polygon | RoundedRectangle | any);
lineWidth: number;
nativeLines: boolean;
lineColor: number;
lineAlpha: number;
protected _lineTint: number;
fillColor: number;
fillAlpha: number;
protected _fillTint: number;
fill: boolean;
protected holes: Circle[] | Rectangle[] | Ellipse[] | Polygon[] | RoundedRectangle[] | any[];
shape: Circle | Rectangle | Ellipse | Polygon | RoundedRectangle | any;
type?: number;
clone(): GraphicsData;
addHole(shape: Circle | Rectangle | Ellipse | Polygon | RoundedRectangle | any): void;
destroy(options?: DestroyOptions | boolean): void;
}
class Graphics extends Container {
constructor(nativeLines?: boolean);
fillAlpha: number;
lineWidth: number;
nativeLines: boolean;
lineColor: number;
protected graphicsData: GraphicsData[];
tint: number;
protected _prevTint: number;
blendMode: number;
currentPath: GraphicsData;
protected _webGL: any;
isMask: boolean;
boundsPadding: number;
protected _localBounds: Bounds;
dirty: number;
fastRectDirty: number;
clearDirty: number;
boundsDirty: number;
protected cachedSpriteDirty: boolean;
protected _spriteRect: Rectangle;
protected _fastRect: boolean;
static _SPRITE_TEXTURE: Texture;
clone(): Graphics;
lineStyle(lineWidth?: number, color?: number, alpha?: number): Graphics;
moveTo(x: number, y: number): Graphics;
lineTo(x: number, y: number): Graphics;
quadraticCurveTo(cpX: number, cpY: number, toX: number, toY: number): Graphics;
bezierCurveTo(cpX: number, cpY: number, cpX2: number, cpY2: number, toX: number, toY: number): Graphics;
arcTo(x1: number, y1: number, x2: number, y2: number, radius: number): Graphics;
arc(cx: number, cy: number, radius: number, startAngle: number, endAngle: number, anticlockwise?: boolean): Graphics;
beginFill(color: number, alpha?: number): Graphics;
endFill(): Graphics;
drawRect(x: number, y: number, width: number, height: number): Graphics;
drawRoundedRect(x: number, y: number, width: number, height: number, radius: number): Graphics;
drawCircle(x: number, y: number, radius: number): Graphics;
drawEllipse(x: number, y: number, width: number, height: number): Graphics;
drawPolygon(path: number[] | Point[]): Graphics;
clear(): Graphics;
isFastRect(): boolean;
protected _renderCanvas(renderer: CanvasRenderer): void;
protected _calculateBounds(): Rectangle;
protected _renderSpriteRect(renderer: PIXI.SystemRenderer): void;
containsPoint(point: Point): boolean;
updateLocalBounds(): void;
drawShape(shape: Circle | Rectangle | Ellipse | Polygon | RoundedRectangle | any): GraphicsData;
generateCanvasTexture(scaleMode?: number, resolution?: number): Texture;
protected closePath(): Graphics;
protected addHole(): Graphics;
destroy(options?: DestroyOptions | boolean): void;
}
class CanvasGraphicsRenderer {
constructor(renderer: SystemRenderer);
render(graphics: Graphics): void;
protected updateGraphicsTint(graphics: Graphics): void;
protected renderPolygon(points: Point[], close: boolean, context: CanvasRenderingContext2D): void;
destroy(): void;
}
class GraphicsRenderer extends ObjectRenderer {
constructor(renderer: PIXI.CanvasRenderer);
protected graphicsDataPool: GraphicsData[];
protected primitiveShader: PrimitiveShader;
gl: WebGLRenderingContext;
CONTEXT_UID: number;
destroy(): void;
render(graphics: Graphics): void;
protected updateGraphics(graphics: PIXI.Graphics): void;
getWebGLData(webGL: WebGLRenderingContext, type: number, nativeLines: number): WebGLGraphicsData;
}
class WebGLGraphicsData {
constructor(gl: WebGLRenderingContext, shader: glCore.GLShader, attribsState: glCore.AttribState);
gl: WebGLRenderingContext;
color: number[];
points: Point[];
indices: number[];
buffer: WebGLBuffer;
indexBuffer: WebGLBuffer;
dirty: boolean;
glPoints: number[];
glIndices: number[];
shader: glCore.GLShader;
vao: glCore.VertexArrayObject;
nativeLines: boolean;
reset(): void;
upload(): void;
destroy(): void;
}
class PrimitiveShader extends glCore.GLShader { }
// math
namespace GroupD8 {
const E: number;
const SE: number;
const S: number;
const SW: number;
const W: number;
const NW: number;
const N: number;
const NE: number;
const MIRROR_HORIZONTAL: number;
const MIRROR_VERTICAL: number;
function uX(ind: number): number;
function uY(ind: number): number;
function vX(ind: number): number;
function vY(ind: number): number;
function inv(rotation: number): number;
function add(rotationSecond: number, rotationFirst: number): number;
function sub(rotationSecond: number, rotationFirst: number): number;
function rotate180(rotation: number): number;
function isSwapWidthHeight(rotation: number): boolean;
function byDirection(dx: number, dy: number): number;
function matrixAppendRotationInv(matrix: Matrix, rotation: number, tx: number, ty: number): void;
}
class Matrix {
constructor(a?: number, b?: number, c?: number, d?: number, tx?: number, ty?: number);
a: number;
b: number;
c: number;
d: number;
tx: number;
ty: number;
fromArray(array: number[]): void;
set(a: number, b: number, c: number, d: number, tx: number, ty: number): Matrix;
toArray(transpose?: boolean, out?: number[]): number[];
apply(pos: Point, newPos?: Point): Point;
applyInverse(pos: Point, newPos?: Point): Point;
translate(x: number, y: number): Matrix;
scale(x: number, y: number): Matrix;
rotate(angle: number): Matrix;
append(matrix: Matrix): Matrix;
setTransform(x: number, y: number, pivotX: number, pivotY: number, scaleX: number, scaleY: number, rotation: number, skewX: number, skewY: number): PIXI.Matrix;
prepend(matrix: Matrix): Matrix;
invert(): Matrix;
identity(): Matrix;
decompose(transform: TransformBase): TransformBase;
clone(): Matrix;
copy(matrix: Matrix): Matrix;
static IDENTITY: Matrix;
static TEMP_MATRIX: Matrix;
}
class PointLike {
x: number;
y: number;
set(x?: number, y?: number): void;
copy(point: PointLike): void;
}
class ObservablePoint extends PointLike {
constructor(cb: () => any, scope?: any, x?: number, y?: number);
cb: () => any;
scope: any;
}
class Point extends PointLike {
constructor(x?: number, y?: number);
clone(): Point;
equals(p: PointLike): boolean;
}
interface HitArea {
contains(x: number, y: number): boolean;
}
class Circle {
constructor(x?: number, y?: number, radius?: number);
x: number;
y: number;
radius: number;
type: number;
clone(): Circle;
contains(x: number, y: number): boolean;
getBounds(): Rectangle;
}
class Ellipse {
constructor(x?: number, y?: number, width?: number, height?: number);
x: number;
y: number;
width: number;
height: number;
type: number;
clone(): Ellipse;
contains(x: number, y: number): boolean;
getBounds(): Rectangle;
}
class Polygon {
constructor(points: Point[] | number[]);
// Note - Rest Params cannot be combined with |
//tslint:disable-next-line:unified-signatures
constructor(...points: Point[]);
//tslint:disable-next-line:unified-signatures
constructor(...points: number[]);
closed: boolean;
points: number[];
type: number;
clone(): Polygon;
contains(x: number, y: number): boolean;
close(): void;
}
class Rectangle {
constructor(x?: number, y?: number, width?: number, height?: number);
x: number;
y: number;
width: number;
height: number;
type: number;
left: number;
right: number;
top: number;
bottom: number;
static EMPTY: Rectangle;
clone(): Rectangle;
copy(rectangle: Rectangle): Rectangle;
contains(x: number, y: number): boolean;
pad(paddingX: number, paddingY: number): void;
fit(rectangle: Rectangle): void;
enlarge(rectangle: Rectangle): void;
}
class RoundedRectangle {
constructor(x?: number, y?: number, width?: number, height?: number, radius?: number);
x: number;
y: number;
width: number;
height: number;
radius: number;
type: number;
clone(): RoundedRectangle;
contains(x: number, y: number): boolean;
}
// renderers
interface RendererOptions {
/**
* the width of the renderers view [default=800]
*/
width?: number;
/**
* the height of the renderers view [default=600]
*/
height?: number;
/**
* the canvas to use as a view, optional
*/
view?: HTMLCanvasElement;
/**
* If the render view is transparent, [default=false]
*/
transparent?: boolean;
/**
* sets antialias (only applicable in chrome at the moment) [default=false]
*/
antialias?: boolean;
/**
* enables drawing buffer preservation, enable this if you need to call toDataUrl on the webgl context [default=false]
*/
preserveDrawingBuffer?: boolean;
/**
* The resolution / device pixel ratio of the renderer, retina would be 2 [default=1]
*/
resolution?: number;
/**
* prevents selection of WebGL renderer, even if such is present [default=false]
*/
forceCanvas?: boolean;
/**
* The background color of the rendered area (shown if not transparent) [default=0x000000]
*/
backgroundColor?: number;
/**
* This sets if the renderer will clear the canvas or not before the new render pass. [default=true]
*/
clearBeforeRender?: boolean;
/**
* If true Pixi will Math.floor() x/ y values when rendering, stopping pixel interpolation. [default=false]
*/
roundPixels?: boolean;
/**
* forces FXAA antialiasing to be used over native FXAA is faster, but may not always look as great ** webgl only** [default=false]
*/
forceFXAA?: boolean;
/**
* `true` to ensure compatibility with older / less advanced devices. If you experience unexplained flickering try setting this to true. **webgl only** [default=false]
*/
legacy?: boolean;
/**
* Depricated
*/
context?: WebGLRenderingContext;
/**
* Depricated
*/
autoResize?: boolean;
}
interface ApplicationOptions extends RendererOptions {
/**
* `true` to use PIXI.ticker.shared, `false` to create new ticker. [default=false]
*/
sharedTicker?: boolean;
/**
* `true` to use PIXI.loaders.shared, `false` to create new Loader.
*/
sharedLoader?: boolean;
}
class SystemRenderer extends utils.EventEmitter {
constructor(system: string, options?: RendererOptions);
constructor(system: string, screenWidth?: number, screenHeight?: number, options?: RendererOptions);
type: number;
options: RendererOptions;
screen: Rectangle;
readonly width: number;
readonly height: number;
view: HTMLCanvasElement;
resolution: number;
transparent: boolean;
autoResize: boolean;
blendModes: any; // todo?
preserveDrawingBuffer: boolean;
clearBeforeRender: boolean;
roundPixels: boolean;
protected _backgroundColor: number;
protected _backgroundColorRgba: number[];
protected _backgroundColorString: string;
protected _tempDisplayObjectParent: Container;
protected _lastObjectRendered: DisplayObject;
resize(screenWidth: number, screenHeight: number): void;
generateTexture(displayObject: DisplayObject, scaleMode?: number, resolution?: number): RenderTexture;
render(...args: any[]): void;
destroy(removeView?: boolean): void;
}
class CanvasRenderer extends SystemRenderer {
// plugintarget mixin start
static __plugins: any;
//tslint:disable-next-line:ban-types forbidden-types
static registerPlugin(pluginName: string, ctor: Function): void;
plugins: any;
initPlugins(): void;
destroyPlugins(): void;
// plugintarget mixin end
// from InteractionManager
interaction?: interaction.InteractionManager;
constructor(options?: RendererOptions);
constructor(screenWidth?: number, screenHeight?: number, options?: RendererOptions);
rootContext: CanvasRenderingContext2D;
rootResolution?: number;
refresh: boolean;
maskManager: CanvasMaskManager;
smoothProperty: string;
extract: extract.CanvasExtract;
context: CanvasRenderingContext2D | null;
render(displayObject: PIXI.DisplayObject, renderTexture?: PIXI.RenderTexture, clear?: boolean, transform?: PIXI.Transform, skipUpdateTransform?: boolean): void;
setBlendMode(blendMode: number): void;
destroy(removeView?: boolean): void;
clear(clearColor?: string): void;
on(event: "prerender" | "postrender", fn: () => void, context?: any): this;
once(event: "prerender" | "postrender", fn: () => void, context?: any): this;
removeListener(event: "prerender" | "postrender", fn?: () => void, context?: any): this;
removeAllListeners(event: "prerender" | "postrender"): this;
off(event: "prerender" | "postrender", fn?: () => void, context?: any): this;
addListener(event: "prerender" | "postrender", fn: () => void, context?: any): this;
}
class CanvasMaskManager {
constructor(renderer: CanvasRenderer);
pushMask(maskData: any): void;
protected renderGraphicsShape(graphics: Graphics): void;
popMask(renderer: WebGLRenderer | CanvasRenderer): void;
destroy(): void;
}
class CanvasRenderTarget {
constructor(width: number, height: number, resolution: number);
canvas: HTMLCanvasElement;
context: CanvasRenderingContext2D;
resolution: number;
width: number;
height: number;
clear(): void;
resize(width: number, height: number): void;
destroy(): void;
}
interface WebGLRendererOptions extends RendererOptions {
}
class WebGLRenderer extends SystemRenderer {
// plugintarget mixin start
static __plugins: any;
//tslint:disable-next-line:ban-types forbidden-types
static registerPlugin(pluginName: string, ctor: Function): void;
plugins: any;
initPlugins(): void;
destroyPlugins(): void;
// plugintarget mixin end
// from InteractionManager
interaction: interaction.InteractionManager;
constructor(options?: WebGLRendererOptions);
constructor(screenWidth?: number, screenHeight?: number, options?: WebGLRendererOptions);
protected _contextOptions: {
alpha: boolean;
antiAlias?: boolean;
premultipliedAlpha: boolean;
stencil: boolean;
preseveDrawingBuffer?: boolean;
};
protected _backgroundColorRgba: number[];
maskManager: MaskManager;
stencilManager?: StencilManager;
emptyRenderer: ObjectRenderer;
currentRenderer: ObjectRenderer;
gl: WebGLRenderingContext;
CONTEXT_UID: number;
state?: WebGLState;
renderingToScreen: boolean;
boundTextures: Texture[];
filterManager: FilterManager;
textureManager?: TextureManager;
textureGC?: TextureGarbageCollector;
extract: extract.WebGLExtract;
protected drawModes: any;
protected _activeShader: Shader;
_activeRenderTarget: RenderTarget;
protected _initContext(): void;
render(displayObject: PIXI.DisplayObject, renderTexture?: PIXI.RenderTexture, clear?: boolean, transform?: PIXI.Transform, skipUpdateTransform?: boolean): void;
setObjectRenderer(objectRenderer: ObjectRenderer): void;
flush(): void;
setBlendMode(blendMode: number): void;
clear(clearColor?: number): void;
setTransform(matrix: Matrix): void;
clearRenderTexture(renderTexture: RenderTexture, clearColor?: number): WebGLRenderer;
bindRenderTexture(renderTexture: RenderTexture, transform: Transform): WebGLRenderer;
bindRenderTarget(renderTarget: RenderTarget): WebGLRenderer;
bindShader(shader: Shader, autoProject?: boolean): WebGLRenderer;
bindTexture(texture: Texture | BaseTexture, location?: number, forceLocation?: boolean): number;
unbindTexture(texture: Texture | BaseTexture): WebGLRenderer | undefined;
createVao(): glCore.VertexArrayObject;
bindVao(vao: glCore.VertexArrayObject): WebGLRenderer;
reset(): WebGLRenderer;
handleContextLost: (event: WebGLContextEvent) => void;
handleContextRestored: () => void;
destroy(removeView?: boolean): void;
on(event: "prerender" | "postrender", fn: () => void, context?: any): this;
on(event: "context", fn: (gl: WebGLRenderingContext) => void, context?: any): this;
once(event: "prerender" | "postrender", fn: () => void, context?: any): this;
once(event: "context", fn: (gl: WebGLRenderingContext) => void, context?: any): this;
removeListener(event: "prerender" | "postrender", fn?: () => void, context?: any): this;
removeListener(event: "context", fn?: (gl: WebGLRenderingContext) => void, context?: any): this;
removeAllListeners(event: "prerender" | "postrender" | "context"): this;
off(event: "prerender" | "postrender", fn?: () => void, context?: any): this;
off(event: "context", fn?: (gl: WebGLRenderingContext) => void, context?: any): this;
addListener(event: "prerender" | "postrender", fn: () => void, context?: any): this;
addListener(event: "context", fn: (gl: WebGLRenderingContext) => void, context?: any): this;
}
class WebGLState {
constructor(gl: WebGLRenderingContext);
activeState: number[];
defaultState: number[];
stackIndex: number;
stack: number[];
gl: WebGLRenderingContext;
maxAttribs: number;
attribState: glCore.AttribState;
nativeVaoExtension: any;
push(): void;
pop(): void;
setState(state: number[]): void;
setBlend(value: number): void;
setBlendMode(value: number): void;
setDepthTest(value: number): void;
setCullFace(value: number): void;
setFrontFace(value: number): void;
resetAttributes(): void;
resetToDefault(): void;
}
class TextureManager {
constructor(renderer: WebGLRenderer);
renderer: WebGLRenderer;
gl: WebGLRenderingContext;
protected _managedTextures: Texture[];
bindTexture(): void;
getTexture(): WebGLTexture;
updateTexture(texture: BaseTexture | Texture): WebGLTexture;
destroyTexture(texture: BaseTexture, _skipRemove?: boolean): void;
removeAll(): void;
destroy(): void;
}
class TextureGarbageCollector {
constructor(renderer: WebGLRenderer);
renderer: WebGLRenderer;
count: number;
checkCount: number;
maxIdle: number;
checkCountMax: number;
mode: number;
update(): void;
run(): void;
unload(): void;
}
abstract class ObjectRenderer extends WebGLManager {
constructor(renderer: WebGLRenderer);
start(): void;
stop(): void;
flush(): void;
render(...args: any[]): void;
}
class Quad {
constructor(gl: WebGLRenderingContext);
gl: WebGLRenderingContext;
vertices: number[];
uvs: number[];
interleaved: number[];
indices: number[];
vertexBuffer: WebGLBuffer;
vao: glCore.VertexArrayObject;
initVao(shader: glCore.GLShader): void;
map(targetTextureFrame: Rectangle, destinationFrame: Rectangle): Quad;
upload(): Quad;
destroy(): void;
}
interface FilterDataStackItem {
renderTarget: RenderTarget;
filter: any[];
bounds: Rectangle;
}
class RenderTarget {
constructor(gl: WebGLRenderingContext, width: number, height: number, scaleMode: number, resolution: number, root?: boolean);
gl: WebGLRenderingContext;
frameBuffer: glCore.GLFramebuffer;
texture: Texture;
clearColor: number[];
size: Rectangle;
resolution: number;
projectionMatrix: Matrix;
transform: Matrix;
frame: Rectangle;
defaultFrame: Rectangle;
destinationFrame: Rectangle;
sourceFrame?: Rectangle;
stencilBuffer: glCore.GLFramebuffer;
stencilMaskStack: Graphics[];
filterData: {
index: number,
stack: FilterDataStackItem[];
};
scaleMode: number;
root: boolean;
clear(clearColor?: number[]): void;
attachStencilBuffer(): void;
setFrame(destinationFrame: Rectangle, sourceFrame: Rectangle): void;
activate(): void;
calculateProjection(destinationFrame: Rectangle, sourceFrame: Rectangle): void;
resize(width: number, height: number): void;
destroy(): void;
}
class BlendModeManager extends WebGLManager {
constructor(renderer: WebGLRenderer);
currentBlendMode: number;
setBlendMode(blendMode: number): boolean;
}
interface FilterManagerStackItem {
renderTarget: RenderTarget;
sourceFrame: Rectangle;
destinationFrame: Rectangle;
filters: Filter[];
target: any;
resolution: number;
}
class FilterManager extends WebGLManager {
constructor(renderer: WebGLRenderer);
gl: WebGLRenderingContext;
quad: Quad;
stack: FilterManagerStackItem[];
stackIndex: number;
shaderCache: any;
filterData: any;
pushFilter(target: RenderTarget, filters: Filter[]): void;
popFilter(): void;
applyFilter(shader: glCore.GLShader | Filter, inputTarget: RenderTarget, outputTarget: RenderTarget, clear?: boolean): void;
syncUniforms(shader: glCore.GLShader, filter: Filter): void;
getRenderTarget(clear?: boolean, resolution?: number): RenderTarget;
returnRenderTarget(renderTarget: RenderTarget): RenderTarget;
calculateScreenSpaceMatrix(outputMatrix: Matrix): Matrix;
calculateNormalizedScreenSpaceMatrix(outputMatrix: Matrix): Matrix;
calculateSpriteMatrix(outputMatrix: Matrix, sprite: Sprite): Matrix;
destroy(): void;
emptyPool(): void;
getPotRenderTarget(gl: WebGLRenderingContext, minWidth: number, minHeight: number, resolution: number): RenderTarget;
freePotRenderTarget(renderTarget: RenderTarget): void;
}
class StencilMaskStack {
stencilStack: any[];
reverse: boolean;
count: number;
}
class MaskManager extends WebGLManager {
scissor: boolean;
scissorData: any;
scissorRenderTarget: RenderTarget;
enableScissor: boolean;
alphaMaskPool: number[];
alphaMaskIndex: number;
pushMask(target: RenderTarget, maskData: Sprite | Graphics): void;
popMask(target: RenderTarget, maskData: Sprite | Graphics): void;
pushSpriteMask(target: RenderTarget, maskData: Sprite | Graphics): void;
popSpriteMask(): void;
pushStencilMask(maskData: Sprite | Graphics): void;
popStencilMask(): void;
pushScissorMask(target: RenderTarget, maskData: Sprite | Graphics): void;
popScissorMask(): void;
}
class StencilManager extends WebGLManager {
constructor(renderer: WebGLRenderer);
stencilMaskStack: Graphics[];
setMaskStack(stencilMasStack: Graphics[]): void;
pushStencil(graphics: Graphics): void;
popStencil(): void;
destroy(): void;
}
class WebGLManager {
constructor(renderer: WebGLRenderer);
renderer: SystemRenderer;
onContextChange(): void;
destroy(): void;
}
interface UniformData {
type: string;
value: any;
// name is set by pixi if uniforms were automatically extracted from shader code, but not used anywhere
name?: string;
}
class Filter {
constructor(vertexSrc?: string, fragmentSrc?: string, uniforms?: { [name: string]: UniformData });
vertextSrc?: string;
fragmentSrc: string;
blendMode: number;
protected uniformData: { [name: string]: UniformData };
uniforms: { [name: string]: any } | any;
glShaders: any;
glShaderKey?: number;
padding: number;
resolution: number;
enabled: boolean;
autoFit: boolean;
apply(filterManager: FilterManager, input: RenderTarget, output: RenderTarget, clear?: boolean, currentState?: any): void;
static defaultVertexSrc: string;
static defaultFragmentSrc: string;
}
class SpriteMaskFilter extends Filter {
constructor(sprite: Sprite);
maskSprite: Sprite;
maskMatrix: Matrix;
apply(filterManager: FilterManager, input: RenderTarget, output: RenderTarget): void;
}
// sprites
class Sprite extends Container {
constructor(texture?: Texture);
protected _anchor: ObservablePoint;
anchor: ObservablePoint;
protected _texture: Texture;
protected _transformTrimmedID: number;
protected _textureTrimmedID: number;
protected _width: number;
protected _height: number;
tint: number;
protected _tint: number;
protected _tintRGB: number;
blendMode: number;
pluginName: string;
protected cachedTint: number;
texture: Texture;
protected textureDirty: boolean;
protected _textureID: number;
protected _transformID: number;
protected vertexTrimmedData: Float32Array;
vertexData: Float32Array;
width: number;
height: number;
protected _onTextureUpdate(): void;
calculateVertices(): void;
protected _calculateBounds(): void;
protected calculateTrimmedVertices(): void;
protected onAnchorUpdate(): void;
protected _renderWebGL(renderer: WebGLRenderer): void;
protected _renderCanvas(renderer: CanvasRenderer): void;
getLocalBounds(): Rectangle;
containsPoint(point: Point): boolean;
destroy(options?: DestroyOptions | boolean): void;
static from(source: number | string | BaseTexture | HTMLImageElement | HTMLCanvasElement | HTMLVideoElement): Sprite;
static fromFrame(frameId: string): Sprite;
static fromImage(imageId: string, crossorigin?: boolean, scaleMode?: number): Sprite;
}
class BatchBuffer {
vertices: ArrayBuffer;
float32View: number[];
uint32View: number[];
destroy(): void;
}
class SpriteRenderer extends ObjectRenderer {
constructor(renderer: PIXI.WebGLRenderer);
vertSize: number;
vertByteSize: number;
size: number;
buffers: BatchBuffer[];
indices: number[];
shaders: Shader[];
currentIndex: number;
tick: number;
groups: any[];
sprites: Sprite[];
vertexBuffers: number[];
vaos: glCore.VertexArrayObject[];
vaoMax: number;
vertexCount: number;
protected onContextChanged: () => void;
protected onPrerender: () => void;
render(sprite: Sprite): void;
flush(): void;
start(): void;
stop(): void;
destroy(): void;
}
class CanvasSpriteRenderer extends ObjectRenderer {
constructor(renderer: WebGLRenderer);
render(sprite: Sprite): void;
destroy(): void;
}
namespace CanvasTinter {
function getTintedTexture(sprite: Sprite, color: number): HTMLCanvasElement;
function tintWithMultiply(texture: Texture, color: number, canvas: HTMLCanvasElement): void;
function tintWithOverlay(texture: Texture, color: number, canvas: HTMLCanvasElement): void;
function tintWithPerPixel(texture: Texture, color: number, canvas: HTMLCanvasElement): void;
function roundColor(color: number): number;
let cacheStepsPerColorChannel: number;
let convertTintToImage: boolean;
let canUseMultiply: boolean;
let tintMethod: number;
}
// text
interface TextStyleOptions {
align?: string;
breakWords?: boolean;
dropShadow?: boolean;
dropShadowAlpha?: number;
dropShadowAngle?: number;
dropShadowBlur?: number;
dropShadowColor?: string | number;
dropShadowDistance?: number;
fill?: string | string[] | number | number[] | CanvasGradient | CanvasPattern;
fillGradientType?: number;
fillGradientStops?: number[];
fontFamily?: string | string[];
fontSize?: number | string;
fontStyle?: string;
fontVariant?: string;
fontWeight?: string;
letterSpacing?: number;
lineHeight?: number;
lineJoin?: string;
miterLimit?: number;
padding?: number;
stroke?: string | number;
strokeThickness?: number;
textBaseline?: string;
trim?: boolean;
wordWrap?: boolean;
wordWrapWidth?: number;
}
class TextStyle implements TextStyleOptions {
constructor(style: TextStyleOptions)
styleID: number;
clone(): TextStyle;
reset(): void;
protected _align: string;
align: string;
protected _breakWords: boolean;
breakWords: boolean;
protected _dropShadow: boolean;
dropShadow: boolean;
protected _dropShadowAlpha: number;
dropShadowAlpha: number;
protected _dropShadowAngle: number;
dropShadowAngle: number;
protected _dropShadowBlur: number;
dropShadowBlur: number;
protected _dropShadowColor: string | number;
dropShadowColor: string | number;
protected _dropShadowDistance: number;
dropShadowDistance: number;
protected _fill: string | string[] | number | number[] | CanvasGradient | CanvasPattern;
fill: string | string[] | number | number[] | CanvasGradient | CanvasPattern;
protected _fillGradientType: number;
fillGradientType: number;
protected _fillGradientStops: number[];
fillGradientStops: number[];
protected _fontFamily: string | string[];
fontFamily: string | string[];
protected _fontSize: number | string;
fontSize: number | string;
protected _fontStyle: string;
fontStyle: string;
protected _fontVariant: string;
fontVariant: string;
protected _fontWeight: string;
fontWeight: string;
protected _letterSpacing: number;
letterSpacing: number;
protected _lineHeight: number;
lineHeight: number;
protected _lineJoin: string;
lineJoin: string;
protected _miterLimit: number;
miterLimit: number;
protected _padding: number;
padding: number;
protected _stroke: string | number;
stroke: string | number;
protected _strokeThickness: number;
strokeThickness: number;
protected _textBaseline: string;
textBaseline: string;
protected _trim: boolean;
trim: boolean;
protected _wordWrap: boolean;
wordWrap: boolean;
protected _wordWrapWidth: number;
wordWrapWidth: number;
toFontString(): string;
}
class TextMetrics {
protected _canvas: HTMLCanvasElement;
protected _context: CanvasRenderingContext2D;
protected _fonts: FontMetrics;
text: string;
style: TextStyle;
width: number;
height: number;
lines: number[];
lineWidgets: number[];
lineHeight: number;
maxLineWidth: number;
fontProperties: any;
constructor(text: string, style: TextStyle, width: number, height: number, lines: number[], lineWidths: number[], lineHeight: number, maxLineWidth: number, fontProperties: any);
static measureText(text: string, style: TextStyle, wordWrap?: boolean, canvas?: HTMLCanvasElement): TextMetrics;
static wordWrap(text: string, style: TextStyle, canvas?: HTMLCanvasElement): string;
static measureFont(font: string): FontMetrics;
}
interface FontMetrics {
ascent: number;
descent: number;
fontSize: number;
}
class Text extends Sprite {
constructor(text?: string, style?: TextStyleOptions, canvas?: HTMLCanvasElement);
canvas: HTMLCanvasElement;
context: CanvasRenderingContext2D;
resolution: number;
protected _text: string;
protected _style: TextStyle;
//tslint:disable-next-line:ban-types forbidden-types
protected _styleListener: Function;
protected _font: string;
protected localStyleID: number;
width: number;
height: number;
style: TextStyle;
text: string;
protected updateText(respectDirty?: boolean): void;
protected drawLetterSpacing(text: string, x: number, y: number, isStroke?: boolean): void;
protected updateTexture(): void;
renderWebGL(renderer: WebGLRenderer): void;
protected _renderCanvas(renderer: CanvasRenderer): void;
getLocalBounds(rect?: Rectangle): Rectangle;
protected _calculateBounds(): void;
protected _onStyleChange: () => void;
protected _generateFillStyle(style: TextStyle, lines: string[]): string | number | CanvasGradient;
destroy(options?: DestroyOptions | boolean): void;
dirty: boolean;
}
// textures
class BaseRenderTexture extends BaseTexture {
constructor(width?: number, height?: number, scaleMode?: number, resolution?: number);
height: number;
width: number;
realHeight: number;
realWidth: number;
resolution: number;
scaleMode: number;
hasLoaded: boolean;
protected _glRenderTargets: { [n: number]: WebGLTexture; };
protected _canvasRenderTarget: { [n: number]: WebGLTexture; };
valid: boolean;
resize(width: number, height: number): void;
destroy(): void;
on(event: "update", fn: (baseRenderTexture: BaseRenderTexture) => void, context?: any): this;
once(event: "update", fn: (baseRenderTexture: BaseRenderTexture) => void, context?: any): this;
removeListener(event: "update", fn?: (baseRenderTexture: BaseRenderTexture) => void, context?: any): this;
removeAllListeners(event: "update"): this;
off(event: "update", fn?: (baseRenderTexture: BaseRenderTexture) => void, context?: any): this;
addListener(event: "update", fn: (baseRenderTexture: BaseRenderTexture) => void, context?: any): this;
}
class BaseTexture extends utils.EventEmitter {
static from(source: string | HTMLImageElement | HTMLCanvasElement, scaleMode?: number, sourceScale?: number): BaseTexture;
constructor(source?: HTMLImageElement | HTMLCanvasElement | HTMLVideoElement, scaleMode?: number, resolution?: number);
protected uuid?: number;
protected touched: number;
resolution: number;
width: number;
height: number;
realWidth: number;
realHeight: number;
scaleMode: number;
hasLoaded: boolean;
isLoading: boolean;
wrapMode: number;
source: HTMLImageElement | HTMLCanvasElement | HTMLVideoElement | null;
origSource: HTMLImageElement | null;
imageType: string | null;
sourceScale: number;
premultipliedAlpha: boolean;
imageUrl: string | null;
protected isPowerOfTwo: boolean;
mipmap: boolean;
wrap?: boolean;
protected _glTextures: any;
protected _enabled: number;
protected _id?: number;
protected _virtualBoundId: number;
protected readonly _destroyed: boolean;
textureCacheIds: string[];
update(): void;
protected _updateDimensions(): void;
protected _updateImageType(): void;
protected _loadSvgSource(): void;
protected _loadSvgSourceUsingDataUri(dataUri: string): void;
protected _loadSvgSourceUsingXhr(): void;
protected _loadSvgSourceUsingString(svgString: string): void;
protected loadSource(source: HTMLImageElement | HTMLCanvasElement | HTMLVideoElement): void;
protected _sourceLoaded(): void;
destroy(): void;
dispose(): void;
updateSourceImage(newSrc: string): void;
static fromImage(imageUrl: string, crossorigin?: boolean, scaleMode?: number, sourceScale?: number): BaseTexture;
static fromCanvas(canvas: HTMLCanvasElement, scaleMode?: number, origin?: string): BaseTexture;
static addToCache(baseTexture: BaseTexture, id: string): void;
static removeFromCache(baseTexture: string | BaseTexture): BaseTexture;
on(event: "update" | "loaded" | "error" | "dispose", fn: (baseTexture: BaseTexture) => void, context?: any): this;
once(event: "update" | "loaded" | "error" | "dispose", fn: (baseTexture: BaseTexture) => void, context?: any): this;
removeListener(event: "update" | "loaded" | "error" | "dispose", fn?: (baseTexture: BaseTexture) => void, context?: any): this;
removeAllListeners(event: "update" | "loaded" | "error" | "dispose"): this;
off(event: "update" | "loaded" | "error" | "dispose", fn?: (baseTexture: BaseTexture) => void, context?: any): this;
addListener(event: "update" | "loaded" | "error" | "dispose", fn: (baseTexture: BaseTexture) => void, context?: any): this;
}
class RenderTexture extends Texture {
constructor(baseRenderTexture: BaseRenderTexture, frame?: Rectangle);
protected legacyRenderer: any;
valid: boolean;
resize(width: number, height: number, doNotResizeBaseTexture?: boolean): void;
static create(width?: number, height?: number, scaleMode?: number, resolution?: number): RenderTexture;
}
class Texture extends utils.EventEmitter {
constructor(baseTexture: BaseTexture, frame?: Rectangle, orig?: Rectangle, trim?: Rectangle, rotate?: number);
noFrame: boolean;
baseTexture: BaseTexture;
protected _frame: Rectangle;
trim?: Rectangle;
valid: boolean;
requiresUpdate: boolean;
protected _uvs: TextureUvs;
orig: Rectangle;
protected _updateID: number;
transform: any;
textureCacheIds: string[];
update(): void;
protected onBaseTextureLoaded(baseTexture: BaseTexture): void;
protected onBaseTextureUpdated(baseTexture: BaseTexture): void;
destroy(destroyBase?: boolean): void;
clone(): Texture;
protected _updateUvs(): void;
static fromImage(imageUrl: string, crossOrigin?: boolean, scaleMode?: number, sourceScale?: number): Texture;
static fromFrame(frameId: string): Texture;
static fromCanvas(canvas: HTMLCanvasElement, scaleMode?: number, origin?: string): Texture;
static fromVideo(video: HTMLVideoElement | string, scaleMode?: number): Texture;
static fromVideoUrl(videoUrl: string, scaleMode?: number): Texture;
static from(source: number | string | HTMLImageElement | HTMLCanvasElement | HTMLVideoElement | BaseTexture): Texture;
static fromLoader(source: HTMLImageElement | HTMLCanvasElement, imageUrl: string, name?: string): Texture;
static addToCache(texture: Texture, id: string): void;
static removeFromCache(texture: string | Texture): Texture;
// depreciation
static addTextureToCache(texture: Texture, id: string): void;
static removeTextureFromCache(id: string): Texture;
frame: Rectangle;
protected _rotate: boolean | 0;
rotate: number;
width: number;
height: number;
static EMPTY: Texture;
static WHITE: Texture;
on(event: "update", fn: (texture: Texture) => void, context?: any): this;
once(event: "update", fn: (texture: Texture) => void, context?: any): this;
removeListener(event: "update", fn?: (texture: Texture) => void, context?: any): this;
removeAllListeners(event: "update"): this;
off(event: "update", fn?: (texture: Texture) => void, context?: any): this;
addListener(event: "update", fn: (texture: Texture) => void, context?: any): this;
}
class TextureUvs {
x0: number;
y0: number;
x1: number;
y1: number;
x2: number;
y2: number;
x3: number;
y3: number;
uvsUint32: Uint32Array;
protected set(frame: Rectangle, baseFrame: Rectangle, rotate: number): void;
}
class Spritesheet {
static BATCH_SIZE: number;
constructor(baseTexture: BaseTexture, data: any, resolutionFilename?: string);
baseTexture: BaseTexture;
textures: { [key: string]: Texture; };
data: any;
resolution: number;
protected _frames: any;
protected _frameKeys: string;
protected _batchIndex: number;
protected _callback: (spriteSheet: this, textures: { [key: string]: Texture; }) => void;
protected _updateResolution(resolutionFilename: string): number;
parse(callback: (spriteSheet: this, textures: { [key: string]: Texture; }) => void): void;
protected _processFrames(initialFrameIndex: number): void;
protected _parseComplete(): void;
protected _nextBatch(): void;
destroy(destroyBase?: boolean): void;
}
class VideoBaseTexture extends BaseTexture {
constructor(source: HTMLVideoElement, scaleMode?: number);
autoUpdate: boolean;
autoPlay: boolean;
protected _isAutoUpdating: boolean;
update(): void;
protected _onCanPlay(): void;
protected _onPlayStart(): void;
protected _onPlayStop(): void;
destroy(): void;
protected _isSourcePlaying(): boolean;
protected _isSourceReady(): boolean;
static fromVideo(video: HTMLVideoElement, scaleMode?: number): VideoBaseTexture;
static fromUrl(videoSrc: string | any | string[] | any[]): VideoBaseTexture;
static fromUrls(videoSrc: string | any | string[] | any[]): VideoBaseTexture;
source: HTMLVideoElement;
protected loadSource(source: HTMLVideoElement): void;
}
// ticker
namespace ticker {
const shared: Ticker;
class TickerListener {
constructor(fn: (deltaTime: number) => void, context?: any, priority?: number, once?: boolean);
fn: (deltaTime: number) => void;
context: any;
priority: number;
once: boolean;
next: TickerListener;
previous: TickerListener;
protected _destroyed: boolean;
match(fn: (deltaTime: number) => void, context?: any): boolean;
emit(deltaTime: number): TickerListener;
connect(previous: TickerListener): void;
destroy(hard?: boolean): void;
}
class Ticker {
protected _tick: (time: number) => void;
protected _head: TickerListener;
protected _requestId: number | null;
protected _maxElapsedMS: number;
autoStart: boolean;
deltaTime: number;
elapsedMS: number;
lastTime: number;
speed: number;
started: boolean;
protected _requestIfNeeded(): void;
protected _cancelIfNeeded(): void;
protected _startIfPossible(): void;
add(fn: (deltaTime: number) => void, context?: any, priority?: number): Ticker;
addOnce(fn: (deltaTime: number) => void, context?: any, priority?: number): Ticker;
//tslint:disable-next-line:ban-types forbidden-types
remove(fn: Function, context?: any, priority?: number): Ticker;
protected _addListener(listener: TickerListener): Ticker;
readonly FPS: number;
minFPS: number;
start(): void;
stop(): void;
destroy(): void;
update(currentTime?: number): void;
}
}
// shader
class Shader extends glCore.GLShader { }
//////////////////////////////////////////////////////////////////////////////
////////////////////////////EXTRACT///////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
namespace extract {
class CanvasExtract {
protected renderer: CanvasRenderer;
constructor(renderer: CanvasRenderer);
image(target?: DisplayObject | RenderTexture): HTMLImageElement;
base64(target?: DisplayObject | RenderTexture): string;
canvas(target?: DisplayObject | RenderTexture): HTMLCanvasElement;
pixels(renderTexture?: DisplayObject | RenderTexture): number[];
destroy(): void;
}
class WebGLExtract {
protected renderer: WebGLRenderer;
constructor(renderer: WebGLRenderer);
image(target?: DisplayObject | RenderTexture): HTMLImageElement;
base64(target?: DisplayObject | RenderTexture): string;
canvas(target?: DisplayObject | RenderTexture): HTMLCanvasElement;
pixels(renderTexture?: DisplayObject | RenderTexture): number[];
destroy(): void;
}
}
//////////////////////////////////////////////////////////////////////////////
////////////////////////////EXTRAS////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
namespace extras {
interface BitmapTextStyle {
font?: string | {
name?: string;
size?: number;
};
align?: string;
tint?: number;
}
class BitmapText extends Container {
static registerFont(xml: XMLDocument, texture: Texture): any;
constructor(text: string, style?: BitmapTextStyle);
protected _textWidth: number;
protected _textHeight: number;
textWidth: number;
textHeight: number;
protected _glyphs: Sprite[];
protected _font: string | {
name?: string;
size?: number;
};
font: string | {
name?: string;
size?: number;
};
protected _text: string;
protected _maxWidth: number;
maxWidth: number;
protected _maxLineHeight: number;
maxLineHeight: number;
protected _anchor: ObservablePoint;
dirty: boolean;
tint: number;
align: string;
text: string;
anchor: PIXI.Point | number;
protected updateText(): void;
updateTransform(): void;
getLocalBounds(): Rectangle;
protected validate(): void;
static fonts: any;
}
interface AnimatedSpriteTextureTimeObject {
texture: Texture;
time?: number;
}
class AnimatedSprite extends Sprite {
constructor(textures: Texture[] | AnimatedSpriteTextureTimeObject[], autoUpdate?: boolean);
protected _autoUpdate: boolean;
protected _textures: Texture[];
protected _durations: number[];
textures: Texture[] | AnimatedSpriteTextureTimeObject[];
animationSpeed: number;
loop: boolean;
onComplete: () => void;
onFrameChange: (currentFrame: number) => void;
onLoop: () => void;
protected _currentTime: number;
playing: boolean;
totalFrames: number;
currentFrame: number;
stop(): void;
play(): void;
gotoAndStop(frameNumber: number): void;
gotoAndPlay(frameNumber: number): void;
protected update(deltaTime: number): void;
destroy(options?: DestroyOptions | boolean): void;
static fromFrames(frame: string[]): AnimatedSprite;
static fromImages(images: string[]): AnimatedSprite;
}
class TextureTransform {
constructor(texture: Texture, clampMargin?: number);
protected _texture: Texture;
protected mapCoord: Matrix;
protected uClampFrame: Float32Array;
protected uClampOffset: Float32Array;
protected _lastTextureID: number;
clampOffset: number;
clampMargin: number;
texture: Texture;
update(forceUpdate?: boolean): boolean;
}
class TilingSprite extends Sprite {
constructor(texture: Texture, width?: number, height?: number);
tileTransform: TransformStatic;
protected _width: number;
protected _height: number;
protected _canvasPattern: CanvasPattern;
uvTransform: TextureTransform;
uvRespectAnchor: boolean;
clampMargin: number;
|
protected _onTextureUpdate(): void;
protected _renderWebGL(renderer: WebGLRenderer): void;
protected _renderCanvas(renderer: CanvasRenderer): void;
protected _calculateBounds(): void;
getLocalBounds(rect?: Rectangle): Rectangle;
containsPoint(point: Point): boolean;
destroy(options?: DestroyOptions | boolean): void;
static from(source: number | string | BaseTexture | HTMLCanvasElement | HTMLVideoElement, width?: number, height?: number): TilingSprite;
static fromFrame(frameId: string, width?: number, height?: number): TilingSprite;
// if you remove the next line, the class will break. https://github.com/pixijs/pixi-typescript/issues/96
static fromImage(imageId: string, crossorigin?: boolean, scaleMode?: number): Sprite;
static fromImage(imageId: string, width?: number, height?: number, crossorigin?: boolean, scaleMode?: number): TilingSprite;
width: number;
height: number;
}
class TilingSpriteRenderer extends ObjectRenderer {
constructor(renderer: WebGLRenderer);
render(ts: TilingSprite): void;
}
}
//////////////////////////////////////////////////////////////////////////////
////////////////////////////FILTERS///////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
namespace filters {
class FXAAFilter extends Filter { }
class BlurFilter extends Filter {
constructor(strength?: number, quality?: number, resolution?: number, kernelSize?: number);
blurXFilter: BlurXFilter;
blurYFilter: BlurYFilter;
resolution: number;
padding: number;
passes: number;
blur: number;
blurX: number;
blurY: number;
quality: number;
}
class BlurXFilter extends Filter {
constructor(strength?: number, quality?: number, resolution?: number, kernelSize?: number);
protected _quality: number;
quality: number;
passes: number;
resolution: number;
strength: number;
firstRun: boolean;
blur: number;
}
class BlurYFilter extends Filter {
constructor(strength?: number, quality?: number, resolution?: number, kernelSize?: number);
protected _quality: number;
quality: number;
passes: number;
resolution: number;
strength: number;
firstRun: boolean;
blur: number;
}
class ColorMatrixFilter extends Filter {
constructor();
protected _loadMatrix(matrix: number[], multiply?: boolean): void;
protected _multiply(out: number[], a: number[], b: number[]): void;
protected _colorMatrix(matrix: number[]): void;
matrix: number[];
alpha: number;
brightness(b: number, multiply?: boolean): void;
greyscale(scale: number, multiply?: boolean): void;
blackAndWhite(multiply?: boolean): void;
hue(rotation: number, multiply?: boolean): void;
contrast(amount: number, multiply?: boolean): void;
saturate(amount: number, multiply?: boolean): void;
desaturate(multiply?: boolean): void;
negative(multiply?: boolean): void;
sepia(multiply?: boolean): void;
technicolor(multiply?: boolean): void;
polaroid(multiply?: boolean): void;
toBGR(multiply?: boolean): void;
kodachrome(multiply?: boolean): void;
browni(multiply?: boolean): void;
vintage(multiply?: boolean): void;
colorTone(desaturation: number, toned: number, lightColor: string, darkColor: string, multiply?: boolean): void;
night(intensity: number, multiply?: boolean): void;
predator(amount: number, multiply?: boolean): void;
lsd(multiply?: boolean): void;
reset(): void;
}
class DisplacementFilter extends Filter {
constructor(sprite: Sprite, scale?: number);
scale: Point;
map: Texture;
}
class VoidFilter extends Filter {
glShaderKey: number;
}
// pixi-filters.d.ts todo
// https://github.com/pixijs/pixi-filters/
class NoiseFilter extends Filter {
constructor(noise?: number, seed?: number);
noise: number;
seed: number;
}
}
//////////////////////////////////////////////////////////////////////////////
////////////////////////////INTERACTION///////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
namespace interaction {
interface InteractiveTarget {
interactive: boolean;
interactiveChildren: boolean;
hitArea: PIXI.Rectangle | PIXI.Circle | PIXI.Ellipse | PIXI.Polygon | PIXI.RoundedRectangle;
buttonMode: boolean;
cursor: string;
trackedPointers(): { [key: number]: InteractionTrackingData; };
// depricated
defaultCursor: string;
}
interface InteractionTrackingData {
readonly pointerId: number;
flags: number;
none: number;
over: boolean;
rightDown: boolean;
leftDown: boolean;
}
interface InteractionEvent {
stopped: boolean;
target: DisplayObject;
currentTarget: DisplayObject;
type: string;
data: InteractionData;
stopPropagation(): void;
}
class InteractionData {
global: Point;
target: DisplayObject;
originalEvent: MouseEvent | TouchEvent | PointerEvent;
identifier: number;
isPrimary: boolean;
button: number;
buttons: number;
width: number;
height: number;
tiltX: number;
tiltY: number;
pointerType: string;
pressure: number;
rotationAngle: number;
twist: number;
tangentialPressure: number;
readonly pointerID: number;
protected _copyEvent(event: Touch | MouseEvent | PointerEvent): void;
protected _reset(): void;
getLocalPosition(displayObject: DisplayObject, point?: Point, globalPos?: Point): Point;
}
type InteractionPointerEvents = "pointerdown" | "pointercancel" | "pointerup" |
"pointertap" | "pointerupoutside" | "pointermove" | "pointerover" | "pointerout";
type InteractionTouchEvents = "touchstart" | "touchcancel" | "touchend" |
"touchendoutside" | "touchmove" | "tap";
type InteractionMouseEvents = "rightdown" | "mousedown" | "rightup" | "mouseup" |
"rightclick" | "click" | "rightupoutside" | "mouseupoutside" | "mousemove" |
"mouseover" | "mouseout" | "mouseover";
type InteractionEventTypes = InteractionPointerEvents | InteractionTouchEvents | InteractionMouseEvents;
interface InteractionManagerOptions {
autoPreventDefault?: boolean;
interactionFrequency?: number;
}
class InteractionManager extends utils.EventEmitter {
constructor(renderer: CanvasRenderer | WebGLRenderer | SystemRenderer, options?: InteractionManagerOptions);
renderer: SystemRenderer;
autoPreventDefault: boolean;
interactionFrequency: number;
mouse: InteractionData;
activeInteractionData: { [key: number]: InteractionData; };
interactionDataPool: InteractionData[];
eventData: InteractionEvent;
protected interactionDOMElement: HTMLElement;
moveWhenInside: boolean;
eventsAdded: boolean;
protected mouseOverRenderer: boolean;
readonly supportsTouchEvents: boolean;
readonly supportsPointerEvents: boolean;
protected onPointerUp: (event: PointerEvent) => void;
protected processPointerUp: (interactionEvent: InteractionEvent, displayObject: Container | PIXI.Sprite | PIXI.extras.TilingSprite, hit: boolean) => void;
protected onPointerCancel: (event: PointerEvent) => void;
protected processPointerCancel: (interactionEvent: InteractionEvent, displayObject: PIXI.Container | PIXI.Sprite | PIXI.extras.TilingSprite) => void;
protected onPointerDown: (event: PointerEvent) => void;
protected processPointerDown: (interactionEvent: InteractionEvent, displayObject: PIXI.Container | PIXI.Sprite | PIXI.extras.TilingSprite, hit: boolean) => void;
protected onPointerMove: (event: PointerEvent) => void;
protected processPointerMove: (interactionEvent: InteractionEvent, displayObject: PIXI.Container | PIXI.Sprite | PIXI.extras.TilingSprite, hit: boolean) => void;
protected onPointerOut: (event: PointerEvent) => void;
protected processPointerOverOut: (interactionEvent: InteractionEvent, displayObject: PIXI.Container | PIXI.Sprite | PIXI.extras.TilingSprite, hit: boolean) => void;
protected onPointerOver: (event: PointerEvent) => void;
cursorStyles: {
default: string;
pointer: string;
};
currentCursorMode: string;
cursor: string;
protected _tempPoint: Point;
resolution: number;
hitTest(globalPoint: Point, root?: Container): DisplayObject;
protected setTargetElement(element: HTMLCanvasElement, resolution?: number): void;
protected addEvents(): void;
protected removeEvents(): void;
update(deltaTime?: number): void;
setCursorMode(mode: string): void;
protected dispatchEvent(displayObject: Container | Sprite | extras.TilingSprite, eventString: string, eventData: any): void;
mapPositionToPoint(point: Point, x: number, y: number): void;
//tslint:disable-next-line:ban-types forbidden-types
protected processInteractive(
interactionEvent: InteractionEvent,
displayObject: PIXI.Container | PIXI.Sprite | PIXI.extras.TilingSprite,
func?: Function,
hitTest?: boolean,
interactive?: boolean
): boolean;
//tslint:disable-next-line:ban-types forbidden-types
protected onPointerComplete(
originalEvent: PointerEvent,
cancelled: boolean,
func: Function
): void;
protected getInteractionDataForPointerId(pointerId: number): InteractionData;
protected releaseInteractionDataForPointerId(event: PointerEvent): void;
protected configureInteractionEventForDOMEvent(interactionEvent: InteractionEvent, pointerEvent: PointerEvent, interactionData: InteractionData): InteractionEvent;
protected normalizeToPointerData(event: TouchEvent | MouseEvent | PointerEvent): PointerEvent[];
destroy(): void;
// depricated
defaultCursorStyle: string;
currentCursorStyle: string;
}
}
//////////////////////////////////////////////////////////////////////////////
///////////////////////////////LOADER/////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
// pixi loader extends
// https://github.com/englercj/resource-loader/
// 2.0.6
class MiniSignalBinding {
//tslint:disable-next-line:ban-types forbidden-types
constructor(fn: Function, once?: boolean, thisArg?: any);
//tslint:disable-next-line:ban-types forbidden-types
protected _fn: Function;
protected _once: boolean;
protected _thisArg: any;
protected _next: MiniSignalBinding;
protected _prev: MiniSignalBinding;
protected _owner: MiniSignal;
detach(): boolean;
}
class MiniSignal {
constructor();
protected _head: MiniSignalBinding;
protected _tail: MiniSignalBinding;
handlers(exists?: boolean): MiniSignalBinding[] | boolean;
handlers(exists?: true): boolean;
handlers(exists?: false): MiniSignalBinding[];
has(node: MiniSignalBinding): boolean;
dispatch(): boolean;
//tslint:disable-next-line:ban-types forbidden-types
add(fn: Function, thisArg?: any): any;
//tslint:disable-next-line:ban-types forbidden-types
once(fn: Function, thisArg?: any): any;
detach(node: MiniSignalBinding): MiniSignal;
detachAll(): MiniSignal;
}
namespace loaders {
interface LoaderOptions {
crossOrigin?: boolean | string;
loadType?: number;
xhrType?: string;
metaData?: any;
loadElement?: HTMLImageElement | HTMLAudioElement | HTMLVideoElement;
skipSource?: boolean;
}
interface ResourceDictionary {
[index: string]: PIXI.loaders.Resource;
}
// As of ResourceLoader v2 we no longer require EventEmitter
// However, for depreciation reasons, it remains.
class Loader extends utils.EventEmitter {
// pixi overrides here
//tslint:disable-next-line:ban-types forbidden-types
static addPixiMiddleware(fn: Function): void;
// below this line is the original non-pixi loader
static Resource: any;
static async: any;
static base64: any;
constructor(baseUrl?: string, concurrency?: number);
baseUrl: string;
progress: number;
loading: boolean;
defaultQueryString: string;
//tslint:disable-next-line:ban-types forbidden-types
protected _beforeMiddleware: Function[];
//tslint:disable-next-line:ban-types forbidden-types
protected _afterMiddleware: Function[];
protected _resourcesParsing: Resource[];
//tslint:disable-next-line:ban-types forbidden-types
protected _boundLoadResource: (r: Resource, d: Function) => void;
protected _queue: any;
resources: ResourceDictionary;
onProgress: MiniSignal;
onError: MiniSignal;
onLoad: MiniSignal;
onStart: MiniSignal;
onComplete: MiniSignal;
add(...params: any[]): this;
//tslint:disable-next-line:ban-types forbidden-types
add(name: string, url: string, options?: LoaderOptions, cb?: Function): this;
//tslint:disable-next-line:ban-types forbidden-types
add(obj: string | any | any[], options?: LoaderOptions, cb?: Function): this;
//tslint:disable-next-line:ban-types forbidden-types
pre(fn: Function): this;
//tslint:disable-next-line:ban-types forbidden-types
use(fn: Function): this;
reset(): this;
//tslint:disable-next-line:ban-types forbidden-types
load(cb?: Function): this;
protected _prepareUrl(url: string): string;
//tslint:disable-next-line:ban-types forbidden-types
protected _loadResource(resource: Resource, dequeue: Function): void;
protected _onComplete(): void;
protected _onLoad(resource: Resource): void;
destroy(): void;
// depreciation
on(event: "complete", fn: (loader: loaders.Loader, object: any) => void, context?: any): this;
on(event: "error", fn: (error: Error, loader: loaders.Loader, resource: Resource) => void, context?: any): this;
on(event: "load" | "progress", fn: (loader: loaders.Loader, resource: Resource) => void, context?: any): this;
on(event: "start", fn: (loader: loaders.Loader) => void, context?: any): this;
once(event: "complete", fn: (loader: loaders.Loader, object: any) => void, context?: any): this;
once(event: "error", fn: (error: Error, loader: loaders.Loader, resource: Resource) => void, context?: any): this;
once(event: "load" | "progress", fn: (loader: loaders.Loader, resource: Resource) => void, context?: any): this;
once(event: "start", fn: (loader: loaders.Loader) => void, context?: any): this;
//tslint:disable-next-line:ban-types forbidden-types
off(event: "complete" | "error" | "load" | "progress" | "start" | string, fn?: Function, context?: any): this;
}
interface TextureDictionary {
[index: string]: PIXI.Texture;
}
class Resource {
static setExtensionLoadType(extname: string, loadType: number): void;
static setExtensionXhrType(extname: string, xhrType: string): void;
constructor(name: string, url: string | string[], options?: LoaderOptions);
protected _flags: number;
name: string;
url: string;
extension: string;
data: any;
crossOrigin: boolean | string;
loadType: number;
xhrType: string;
metadata: any;
error: Error;
xhr: XMLHttpRequest | null;
children: Resource[];
type: number;
progressChunk: number;
//tslint:disable-next-line:ban-types forbidden-types
protected _dequeue: Function;
//tslint:disable-next-line:ban-types forbidden-types
protected _onLoadBinding: Function;
//tslint:disable-next-line:ban-types forbidden-types
protected _boundComplete: Function;
//tslint:disable-next-line:ban-types forbidden-types
protected _boundOnError: Function;
//tslint:disable-next-line:ban-types forbidden-types
protected _boundOnProgress: Function;
//tslint:disable-next-line:ban-types forbidden-types
protected _boundXhrOnError: Function;
//tslint:disable-next-line:ban-types forbidden-types
protected _boundXhrOnAbort: Function;
//tslint:disable-next-line:ban-types forbidden-types
protected _boundXhrOnLoad: Function;
//tslint:disable-next-line:ban-types forbidden-types
protected _boundXdrOnTimeout: Function;
onStart: MiniSignal;
onProgress: MiniSignal;
onComplete: MiniSignal;
onAfterMiddleware: MiniSignal;
isDataUrl: boolean;
isComplete: boolean;
isLoading: boolean;
complete(): void;
abort(message?: string): void;
//tslint:disable-next-line:ban-types forbidden-types
load(cb?: Function): void;
protected _hasFlag(flag: number): boolean;
protected _setFlag(flag: number, value: boolean): void;
protected _loadElement(type: string): void;
protected _loadSourceElement(type: string): void;
protected _loadXhr(): void;
protected _loadXdr(): void;
protected _createSource(type: string, url: string, mime?: string): HTMLSourceElement;
protected _onError(event?: any): void;
protected _onProgress(event?: any): void;
protected _xhrOnError(): void;
protected _xhrOnAbort(): void;
protected _xdrOnTimeout(): void;
protected _xhrOnLoad(): void;
protected _determineCrossOrigin(url: string, loc: any): string;
protected _determineXhrType(): number;
protected _determineLoadType(): number;
protected _getExtension(): string;
protected _getMimeXhrType(type: number): string;
static STATUS_FLAGS: {
NONE: number;
DATA_URL: number;
COMPLETE: number;
LOADING: number;
};
static TYPE: {
UNKNOWN: number;
JSON: number;
XML: number;
IMAGE: number;
AUDIO: number;
VIDEO: number;
TEXT: number;
};
static LOAD_TYPE: {
XHR: number;
IMAGE: number;
AUDIO: number;
VIDEO: number;
};
static XHR_RESPONSE_TYPE: {
DEFAULT: string;
BUFFER: string;
BLOB: string;
DOCUMENT: string;
JSON: string;
TEXT: string;
};
static EMPTY_GIF: string;
texture: Texture;
spineAtlas: any;
spineData: any;
textures?: TextureDictionary;
}
}
//////////////////////////////////////////////////////////////////////////////
///////////////////////////////MESH///////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
namespace mesh {
class Mesh extends Container {
constructor(texture: Texture, vertices?: Float32Array, uvs?: Float32Array, indices?: Uint16Array, drawMode?: number);
protected _texture: Texture;
uvs: Float32Array;
vertices: Float32Array;
indices: Uint16Array;
dirty: number;
indexDirty: number;
dirtyVertex: boolean;
protected _geometryVersion: number;
blendMode: number;
pluginName: string;
canvasPadding: number;
drawMode: number;
texture: Texture;
tintRgb: Float32Array;
protected _glDatas: { [n: number]: any; };
protected _uvTransform: extras.TextureTransform;
uploadUvTransform: boolean;
multiplyUvs(): void;
refresh(forceUpdate?: boolean): void;
protected _refresh(): void;
protected _renderWebGL(renderer: WebGLRenderer): void;
protected _renderCanvas(renderer: CanvasRenderer): void;
protected _onTextureUpdate(): void;
protected _calculateBounds(): void;
containsPoint(point: Point): boolean;
tint: number;
static DRAW_MODES: {
TRIANGLE_MESH: number;
TRIANGLES: number;
};
}
class CanvasMeshRenderer {
constructor(renderer: CanvasRenderer);
renderer: CanvasRenderer;
render(mesh: Mesh): void;
protected _renderTriangleMesh(mesh: Mesh): void;
protected _renderTriangles(mesh: Mesh): void;
protected _renderDrawTriangle(mesh: Mesh, index0: number, index1: number, index2: number): void;
protected renderMeshFlat(mesh: Mesh): void;
destroy(): void;
}
class MeshRenderer extends ObjectRenderer {
constructor(renderer: WebGLRenderer);
shader: Shader;
render(mesh: Mesh): void;
}
class Plane extends Mesh {
constructor(texture: Texture, verticesX?: number, verticesY?: number);
protected _ready: boolean;
verticesX: number;
verticesY: number;
drawMode: number;
refresh(): void;
protected _onTexureUpdate(): void;
}
class NineSlicePlane extends Plane {
constructor(texture: Texture, leftWidth?: number, topHeight?: number, rightWidth?: number, bottomHeight?: number);
width: number;
height: number;
leftWidth: number;
rightWidth: number;
topHeight: number;
bottomHeight: number;
protected _leftWidth: number;
protected _rightWidth: number;
protected _topHeight: number;
protected _bottomHeight: number;
protected _height: number;
protected _width: number;
protected _origHeight: number;
protected _origWidth: number;
protected _uvh: number;
protected _uvw: number;
updateHorizontalVertices(): void;
updateVerticalVertices(): void;
protected drawSegment(context: CanvasRenderingContext2D | WebGLRenderingContext, textureSource: any, w: number, h: number, x1: number, y1: number, x2: number, y2: number): void;
protected _refresh(): void;
}
class Rope extends Mesh {
constructor(texture: Texture, points: Point[]);
points: Point[];
colors: number[];
autoUpdate: boolean;
protected _refresh(): void;
refreshVertices(): void;
}
}
//////////////////////////////////////////////////////////////////////////////
/////////////////////////////PARTICLES////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
namespace particles {
interface ParticleContainerProperties {
scale?: boolean;
position?: boolean;
rotation?: boolean;
uvs?: boolean;
alpha?: boolean;
}
class ParticleContainer extends Container {
constructor(size?: number, properties?: ParticleContainerProperties, batchSize?: number);
protected _tint: number;
protected tintRgb: number | any[];
tint: number;
protected _properties: boolean[];
protected _maxSize: number;
protected _batchSize: number;
protected _glBuffers: { [n: number]: WebGLBuffer; };
protected _bufferToUpdate: number;
interactiveChildren: boolean;
blendMode: number;
roundPixels: boolean;
baseTexture: BaseTexture;
setProperties(properties: ParticleContainerProperties): void;
protected onChildrenChange: (smallestChildIndex?: number) => void;
destroy(options?: DestroyOptions | boolean): void;
}
class ParticleBuffer {
constructor(gl: WebGLRenderingContext, properties: any, dynamicPropertyFlags: any[], size: number);
gl: WebGLRenderingContext;
vertSize: number;
vertByteSize: number;
size: number;
dynamicProperties: any[];
staticProperties: any[];
staticStride: number;
staticBuffer: any;
staticData: any;
dynamicStride: number;
dynamicBuffer: any;
dynamicData: any;
destroy(): void;
}
interface ParticleRendererProperty {
attribute: number;
size: number;
uploadFunction(children: PIXI.DisplayObject[], startIndex: number, amount: number, array: number[], stride: number, offset: number): void;
offset: number;
}
class ParticleRenderer extends ObjectRenderer {
constructor(renderer: WebGLRenderer);
shader: glCore.GLShader;
indexBuffer: WebGLBuffer;
properties: ParticleRendererProperty[];
protected tempMatrix: Matrix;
start(): void;
generateBuffers(container: ParticleContainer): ParticleBuffer[];
uploadVertices(children: DisplayObject[], startIndex: number, amount: number, array: number[], stride: number, offset: number): void;
uploadPosition(children: DisplayObject[], startIndex: number, amount: number, array: number[], stride: number, offset: number): void;
uploadRotation(children: DisplayObject[], startIndex: number, amount: number, array: number[], stride: number, offset: number): void;
uploadUvs(children: DisplayObject[], startIndex: number, amount: number, array: number[], stride: number, offset: number): void;
uploadAlpha(children: DisplayObject[], startIndex: number, amount: number, array: number[], stride: number, offset: number): void;
destroy(): void;
indices: Uint16Array;
}
}
//////////////////////////////////////////////////////////////////////////////
////////////////////////////PREPARE///////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
namespace prepare {
type AddHook = (item: any, queue: any[]) => boolean;
type UploadHook<UploadHookSource> = (prepare: UploadHookSource, item: any) => boolean;
abstract class BasePrepare<UploadHookSource>{
constructor(renderer: SystemRenderer);
limiter: CountLimiter | TimeLimiter;
protected renderer: SystemRenderer;
protected uploadHookHelper: UploadHookSource;
protected queue: any[];
protected addHooks: AddHook[];
protected uploadHooks: Array<UploadHook<UploadHookSource>>;
//tslint:disable-next-line:ban-types forbidden-types
protected completes: Function[];
protected ticking: boolean;
protected delayedTick: () => void;
//tslint:disable-next-line:ban-types forbidden-types
upload(item: Function | DisplayObject | Container | BaseTexture | Texture | Graphics | Text | any, done?: () => void): void;
protected tick(): void;
protected prepareItems(): void;
registerFindHook(addHook: AddHook): this;
registerUploadHook(uploadHook: UploadHook<UploadHookSource>): this;
protected findMultipleBaseTextures(item: PIXI.DisplayObject, queue: any[]): boolean;
protected findBaseTexture(item: PIXI.DisplayObject, queue: any[]): boolean;
protected findTexture(item: PIXI.DisplayObject, queue: any[]): boolean;
add(item: PIXI.DisplayObject | PIXI.Container | PIXI.BaseTexture | PIXI.Texture | PIXI.Graphics | PIXI.Text | any): this;
destroy(): void;
}
class CanvasPrepare extends BasePrepare<CanvasPrepare> {
constructor(renderer: CanvasRenderer);
protected canvas: HTMLCanvasElement;
protected ctx: CanvasRenderingContext2D;
}
class WebGLPrepare extends BasePrepare<WebGLRenderer> {
constructor(renderer: WebGLRenderer);
}
class CountLimiter {
constructor(maxItemsPerFrame: number);
protected maxItemsPerFrame: number;
protected itemsLeft: number;
beginFrame(): void;
allowedToUpload(): boolean;
}
class TimeLimiter {
constructor(maxMilliseconds: number);
protected maxMilliseconds: number;
protected frameStart: number;
beginFrame(): void;
allowedToUpload(): boolean;
}
}
//////////////////////////////////////////////////////////////////////////////
/////////////////////////////pixi-gl-core/////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
// pixi-gl-core https://github.com/pixijs/pixi-gl-core
// sharedArrayBuffer as a type is not available yet.
// need to fully define what an `Attrib` is.
namespace glCore {
interface ContextOptions {
/**
* Boolean that indicates if the canvas contains an alpha buffer.
*/
alpha?: boolean;
/**
* Boolean that indicates that the drawing buffer has a depth buffer of at least 16 bits.
*/
depth?: boolean;
/**
* Boolean that indicates that the drawing buffer has a stencil buffer of at least 8 bits.
*/
stencil?: boolean;
/**
* Boolean that indicates whether or not to perform anti-aliasing.
*/
antialias?: boolean;
/**
* Boolean that indicates that the page compositor will assume the drawing buffer contains colors with pre-multiplied alpha.
*/
premultipliedAlpha?: boolean;
/**
* If the value is true the buffers will not be cleared and will preserve their values until cleared or overwritten by the author.
*/
preserveDrawingBuffer?: boolean;
/**
* Boolean that indicates if a context will be created if the system performance is low.
*/
failIfMajorPerformanceCaveat?: boolean;
}
function createContext(view: HTMLCanvasElement, options?: ContextOptions): WebGLRenderingContext;
function setVertexAttribArrays(gl: WebGLRenderingContext, attribs: Attrib[], state?: WebGLState): WebGLRenderingContext | undefined;
class GLBuffer {
constructor(gl: WebGLRenderingContext, type: number, data: ArrayBuffer | ArrayBufferView | any, drawType: number);
protected _updateID?: number;
gl: WebGLRenderingContext;
buffer: WebGLBuffer;
type: number;
drawType: number;
data: ArrayBuffer | ArrayBufferView | any;
upload(data: ArrayBuffer | ArrayBufferView | any, offset?: number, dontBind?: boolean): void;
bind(): void;
static createVertexBuffer(gl: WebGLRenderingContext, data: ArrayBuffer | ArrayBufferView | any, drawType: number): GLBuffer;
static createIndexBuffer(gl: WebGLRenderingContext, data: ArrayBuffer | ArrayBufferView | any, drawType: number): GLBuffer;
static create(gl: WebGLRenderingContext, type: number, data: ArrayBuffer | ArrayBufferView | any, drawType: number): GLBuffer;
destroy(): void;
}
class GLFramebuffer {
constructor(gl: WebGLRenderingContext, width: number, height: number);
gl: WebGLRenderingContext;
frameBuffer: WebGLFramebuffer;
stencil: WebGLRenderbuffer;
texture: GLTexture;
width: number;
height: number;
enableTexture(texture: GLTexture): void;
enableStencil(): void;
clear(r: number, g: number, b: number, a: number): void;
bind(): void;
unbind(): void;
resize(width: number, height: number): void;
destroy(): void;
static createRGBA(gl: WebGLRenderingContext, width: number, height: number, data: ArrayBuffer | ArrayBufferView | any): GLFramebuffer;
static createFloat32(gl: WebGLRenderingContext, width: number, height: number, data: ArrayBuffer | ArrayBufferView | any): GLFramebuffer;
}
class GLShader {
constructor(gl: WebGLRenderingContext, vertexSrc: string | string[], fragmentSrc: string | string[], precision?: string, attributeLocations?: { [key: string]: number });
gl: WebGLRenderingContext;
program?: WebGLProgram | null;
uniformData: any;
uniforms: any;
attributes: any;
bind(): void;
destroy(): void;
}
class GLTexture {
constructor(gl: WebGLRenderingContext, width?: number, height?: number, format?: number, type?: number);
gl: WebGLRenderingContext;
texture: WebGLTexture;
mipmap: boolean;
premultiplyAlpha: boolean;
width: number;
height: number;
format: number;
type: number;
upload(source: HTMLImageElement | ImageData | HTMLVideoElement | HTMLCanvasElement): void;
uploadData(data: ArrayBuffer | ArrayBufferView, width: number, height: number): void;
bind(location?: number): void;
unbind(): void;
minFilter(linear: boolean): void;
magFilter(linear: boolean): void;
enableMipmap(): void;
enableLinearScaling(): void;
enableNearestScaling(): void;
enableWrapClamp(): void;
enableWrapRepeat(): void;
enableWrapMirrorRepeat(): void;
destroy(): void;
static fromSource(gl: WebGLRenderingContext, source: HTMLImageElement | ImageData | HTMLVideoElement | HTMLCanvasElement, premultipleAlpha?: boolean): GLTexture;
static fromData(gl: WebGLRenderingContext, data: number[], width: number, height: number): GLTexture;
}
interface Attrib {
attribute: {
location: number;
size: number;
};
normalized: boolean;
stride: number;
start: number;
buffer: ArrayBuffer;
}
interface WebGLRenderingContextAttribute {
buffer: WebGLBuffer;
attribute: any;
type: number;
normalized: boolean;
stride: number;
start: number;
}
interface AttribState {
tempAttribState: Attrib[];
attribState: Attrib[];
}
class VertexArrayObject {
static FORCE_NATIVE: boolean;
constructor(gl: WebGLRenderingContext, state: WebGLState);
protected nativeVaoExtension: any;
protected nativeState: AttribState;
protected nativeVao: VertexArrayObject;
gl: WebGLRenderingContext;
attributes: Attrib[];
indexBuffer: GLBuffer;
dirty: boolean;
bind(): VertexArrayObject;
unbind(): VertexArrayObject;
activate(): VertexArrayObject;
addAttribute(buffer: GLBuffer, attribute: Attrib, type: number, normalized: boolean, stride: number, start: number): VertexArrayObject;
addIndex(buffer: GLBuffer, options?: any): VertexArrayObject;
clear(): VertexArrayObject;
draw(type: number, size: number, start: number): VertexArrayObject;
destroy(): void;
}
}
//////////////////////////////////////////////////////////////////////////////
///////////////////////////////UTILS//////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
interface DecomposedDataUri {
mediaType: string;
subType: string;
encoding: string;
data: any;
}
namespace utils {
function uid(): number;
function hex2rgb(hex: number, out?: number[]): number[];
function hex2string(hex: number): string;
function rgb2hex(rgb: number[]): number;
function canUseNewCanvasBlendModes(): boolean;
function getResolutionOfUrl(url: string, defaultValue?: number): number;
function getSvgSize(svgString: string): any;
function decomposeDataUri(dataUri: string): DecomposedDataUri | void;
function getUrlFileExtension(url: string): string | void;
function sayHello(type: string): void;
function skipHello(): void;
function isWebGLSupported(): boolean;
function sign(n: number): number;
function removeItems<T>(arr: T[], startIdx: number, removeCount: number): void;
function correctBlendMode(blendMode: number, premultiplied: boolean): number;
function premultiplyTint(tint: number, alpha: number): number;
function premultiplyRgba(rgb: Float32Array | number[], alpha: number, out?: Float32Array, premultiply?: boolean): Float32Array;
function premultiplyTintToRgba(tint: number, alpha: number, out?: Float32Array, premultiply?: boolean): Float32Array;
const premultiplyBlendMode: number[][];
const TextureCache: any;
const BaseTextureCache: any;
// https://github.com/kaimallea/isMobile
namespace isMobile {
const apple: {
phone: boolean;
ipod: boolean;
tablet: boolean;
device: boolean;
};
const android: {
phone: boolean;
tablet: boolean;
device: boolean;
};
const amazon: {
phone: boolean;
tablet: boolean;
device: boolean;
};
const windows: {
phone: boolean;
tablet: boolean;
device: boolean;
};
const seven_inch: boolean;
const other: {
blackberry10: boolean;
blackberry: boolean;
opera: boolean;
firefox: boolean;
chrome: boolean;
device: boolean;
};
const any: boolean;
const phone: boolean;
const tablet: boolean;
}
// https://github.com/primus/eventemitter3
class EventEmitter {
static prefixed: string | boolean;
static EventEmitter: {
new (): EventEmitter;
prefixed: string | boolean;
};
/**
* Minimal EventEmitter interface that is molded against the Node.js
* EventEmitter interface.
*
* @constructor
* @api public
*/
constructor();
/**
* Return an array listing the events for which the emitter has registered listeners.
*
* @returns {(string | symbol)[]}
*/
eventNames(): Array<(string | symbol)>;
/**
* Return the listeners registered for a given event.
*
* @param {(string | symbol)} event The event name.
* @returns {Function[]}
*/
//tslint:disable-next-line:ban-types forbidden-types
listeners(event: string | symbol): Function[];
/**
* Check if there listeners for a given event.
* If `exists` argument is not `true` lists listeners.
*
* @param {(string | symbol)} event The event name.
* @param {boolean} exists Only check if there are listeners.
* @returns {boolean}
*/
listeners(event: string | symbol, exists: boolean): boolean;
/**
* Calls each of the listeners registered for a given event.
*
* @param {(string | symbol)} event The event name.
* @param {...*} args Arguments that are passed to registered listeners
* @returns {boolean} `true` if the event had listeners, else `false`.
*/
emit(event: string | symbol, ...args: any[]): boolean;
/**
* Add a listener for a given event.
*
* @param {(string | symbol)} event The event name.
* @param {Function} fn The listener function.
* @param {*} [context=this] The context to invoke the listener with.
* @returns {EventEmitter} `this`.
*/
//tslint:disable-next-line:ban-types forbidden-types
on(event: string | symbol, fn: Function, context?: any): this;
/**
* Add a one-time listener for a given event.
*
* @param {(string | symbol)} event The event name.
* @param {Function} fn The listener function.
* @param {*} [context=this] The context to invoke the listener with.
* @returns {EventEmitter} `this`.
*/
//tslint:disable-next-line:ban-types forbidden-types
once(event: string | symbol, fn: Function, context?: any): this;
/**
* Remove the listeners of a given event.
*
* @param {(string | symbol)} event The event name.
* @param {Function} fn Only remove the listeners that match this function.
* @param {*} context Only remove the listeners that have this context.
* @param {boolean} once Only remove one-time listeners.
* @returns {EventEmitter} `this`.
*/
//tslint:disable-next-line:ban-types forbidden-types
removeListener(event: string | symbol, fn?: Function, context?: any, once?: boolean): this;
/**
* Remove all listeners, or those of the specified event.
*
* @param {(string | symbol)} event The event name.
* @returns {EventEmitter} `this`.
*/
removeAllListeners(event?: string | symbol): this;
/**
* Alias method for `removeListener`
*/
//tslint:disable-next-line:ban-types forbidden-types
off(event: string | symbol, fn?: Function, context?: any, once?: boolean): this;
/**
* Alias method for `on`
*/
//tslint:disable-next-line:ban-types forbidden-types
addListener(event: string | symbol, fn: Function, context?: any): this;
/**
* This function doesn't apply anymore.
* @deprecated
*/
setMaxListeners(): this;
}
}
//////////////////////////////////////////////////////////////////////////////
/////////////////////////////depreciation/////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
// not sure how to handle blendmodes scalemodes basetexturecache
namespace core {
/**
* @class
* @private
* @name SpriteBatch
* @memberof PIXI
* @see PIXI.ParticleContainer
* @throws {ReferenceError} SpriteBatch does not exist any more, please use the new ParticleContainer instead.
* @deprecated since version 3.0.0
*/
type SpriteBatch = ParticleContainer;
/**
* @class
* @private
* @name AssetLoader
* @memberof PIXI
* @see PIXI.loaders.Loader
* @throws {ReferenceError} The loader system was overhauled in pixi v3, please see the new PIXI.loaders.Loader class.
* @deprecated since version 3.0.0
*/
type AssetLoader = loaders.Loader;
/**
* @class
* @private
* @name Stage
* @memberof PIXI
* @see PIXI.Container
* @deprecated since version 3.0.0
*/
type Stage = Container;
/**
* @class
* @private
* @name DisplayObjectContainer
* @memberof PIXI
* @see PIXI.Container
* @deprecated since version 3.0.0
*/
type DisplayObjectContainer = Container;
/**
* @class
* @private
* @name Strip
* @memberof PIXI
* @see PIXI.mesh.Mesh
* @deprecated since version 3.0.0
*/
type Strip = mesh.Mesh;
/**
* @class
* @private
* @name Rope
* @memberof PIXI
* @see PIXI.mesh.Rope
* @deprecated since version 3.0.0
*/
type Rope = mesh.Rope;
/**
* @class
* @private
* @name ParticleContainer
* @memberof PIXI
* @see PIXI.particles.ParticleContainer
* @deprecated since version 4.0.0
*/
type ParticleContainer = particles.ParticleContainer;
/**
* @class
* @private
* @name MovieClip
* @memberof PIXI
* @see PIXI.extras.MovieClip
* @deprecated since version 3.0.0
*/
type MovieClip = extras.AnimatedSprite;
/**
* @class
* @private
* @name TilingSprite
* @memberof PIXI
* @see PIXI.extras.TilingSprite
* @deprecated since version 3.0.0
*/
type TilingSprite = extras.TilingSprite;
/**
* @class
* @private
* @name BaseTextureCache
* @memberof PIXI
* @see PIXI.utils.BaseTextureCache
* @deprecated since version 3.0.0
*/
type BaseTextureCache = any;
/**
* @class
* @private
* @name BitmapText
* @memberof PIXI
* @see PIXI.extras.BitmapText
* @deprecated since version 3.0.0
*/
type BitmapText = extras.BitmapText;
/**
* @namespace
* @private
* @name math
* @memberof PIXI
* @see PIXI
* @deprecated since version 3.0.6
*/
type math = any;
/**
* @class
* @private
* @name PIXI.AbstractFilter
* @see PIXI.Filter
* @deprecated since version 3.0.6
*/
type AbstractFilter = Filter;
/**
* @class
* @private
* @name PIXI.TransformManual
* @see PIXI.TransformBase
* @deprecated since version 4.0.0
*/
type TransformManual = TransformBase;
/**
* @static
* @constant
* @name PIXI.TARGET_FPMS
* @see PIXI.settings.TARGET_FPMS
* @deprecated since version 4.2.0
*/
type TARGET_FPMS = number;
/**
* @static
* @constant
* @name PIXI.FILTER_RESOLUTION
* @see PIXI.settings.FILTER_RESOLUTION
* @deprecated since version 4.2.0
*/
type FILTER_RESOLUTION = number;
/**
* @static
* @constant
* @name PIXI.RESOLUTION
* @see PIXI.settings.RESOLUTION
* @deprecated since version 4.2.0
*/
type RESOLUTION = number;
/**
* @static
* @constant
* @name PIXI.MIPMAP_TEXTURES
* @see PIXI.settings.MIPMAP_TEXTURES
* @deprecated since version 4.2.0
*/
type MIPMAP_TEXTURES = any;
/**
* @static
* @constant
* @name PIXI.SPRITE_BATCH_SIZE
* @see PIXI.settings.SPRITE_BATCH_SIZE
* @deprecated since version 4.2.0
*/
type SPRITE_BATCH_SIZE = number;
/**
* @static
* @constant
* @name PIXI.SPRITE_MAX_TEXTURES
* @see PIXI.settings.SPRITE_MAX_TEXTURES
* @deprecated since version 4.2.0
*/
type SPRITE_MAX_TEXTURES = number;
/**
* @static
* @constant
* @name PIXI.RETINA_PREFIX
* @see PIXI.settings.RETINA_PREFIX
* @deprecated since version 4.2.0
*/
type RETINA_PREFIX = RegExp | string;
/**
* @static
* @constant
* @name PIXI.DEFAULT_RENDER_OPTIONS
* @see PIXI.settings.RENDER_OPTIONS
* @deprecated since version 4.2.0
*/
type DEFAULT_RENDER_OPTIONS = number;
/**
* @static
* @name PRECISION
* @memberof PIXI.settings
* @see PIXI.PRECISION
* @deprecated since version 4.4.0
*/
type PRECISION = string;
}
namespace extras {
/**
* @class
* @name MovieClip
* @memberof PIXI.extras
* @see PIXI.extras.AnimatedSprite
* @deprecated since version 4.2.0
*/
type MovieClip = extras.AnimatedSprite;
}
namespace settings {
/**
* @static
* @name PRECISION
* @memberof PIXI.settings
* @see PIXI.PRECISION
* @deprecated since version 4.4.0
*/
type PRECISION = number;
}
}
declare namespace pixi {
const gl: typeof PIXI.glCore;
}
//tslint:disable-next-line:no-single-declare-module
declare module "pixi.js" {
export = PIXI;
}
|
tileScale: Point | ObservablePoint;
tilePosition: Point | ObservablePoint;
multiplyUvs(uvs: Float32Array, out: Float32Array): Float32Array;
|
test_v1alpha1_data_volume_source_pvc.py
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubevirt
from kubevirt.rest import ApiException
from kubevirt.models.v1alpha1_data_volume_source_pvc import V1alpha1DataVolumeSourcePVC
class
|
(unittest.TestCase):
""" V1alpha1DataVolumeSourcePVC unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha1DataVolumeSourcePVC(self):
"""
Test V1alpha1DataVolumeSourcePVC
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubevirt.models.v1alpha1_data_volume_source_pvc.V1alpha1DataVolumeSourcePVC()
pass
if __name__ == '__main__':
unittest.main()
|
TestV1alpha1DataVolumeSourcePVC
|
bootstrap.go
|
// Copyright © 2019 VMware
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package envoy contains APIs for translating between Contour
// objects and Envoy configuration APIs and types.
package envoy
import (
"fmt"
"os"
"path"
"strconv"
"strings"
"time"
api "github.com/envoyproxy/go-control-plane/envoy/api/v2"
envoy_api_v2_auth "github.com/envoyproxy/go-control-plane/envoy/api/v2/auth"
clusterv2 "github.com/envoyproxy/go-control-plane/envoy/api/v2/cluster"
envoy_api_v2_core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
envoy_api_bootstrap "github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v2"
matcher "github.com/envoyproxy/go-control-plane/envoy/type/matcher"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/any"
"github.com/projectcontour/contour/internal/protobuf"
)
// sdsResourcesSubdirectory stores the subdirectory name where SDS path resources are stored to.
const sdsResourcesSubdirectory = "sds"
// sdsTLSCertificateFile stores the path to the SDS resource with Envoy's
// client certificate and key for XDS gRPC connection.
const sdsTLSCertificateFile = "xds-tls-certicate.json"
// sdsValidationContextFile stores the path to the SDS resource with
// CA certificates for Envoy to use for the XDS gRPC connection.
const sdsValidationContextFile = "xds-validation-context.json"
// WriteBootstrap writes bootstrap configuration to files.
func WriteBootstrap(c *BootstrapConfig) error {
// Create Envoy bootstrap config and associated resource files.
steps, err := bootstrap(c)
if err != nil {
return err
}
if c.ResourcesDir != "" {
if err := os.MkdirAll(path.Join(c.ResourcesDir, "sds"), 0750); err != nil {
return err
}
}
// Write all configuration files out to filesystem.
for _, step := range steps {
if err := writeConfig(step(c)); err != nil {
return err
}
}
return nil
}
type bootstrapf func(*BootstrapConfig) (string, proto.Message)
// bootstrap creates a new v2 bootstrap configuration and associated resource files.
func bootstrap(c *BootstrapConfig) ([]bootstrapf, error) {
steps := []bootstrapf{}
if c.GrpcClientCert == "" && c.GrpcClientKey == "" && c.GrpcCABundle == "" {
steps = append(steps,
func(*BootstrapConfig) (string, proto.Message) {
return c.Path, bootstrapConfig(c)
})
return steps, nil
}
// If one of the two TLS options is not empty, they all must be not empty.
if c.GrpcClientCert == "" || c.GrpcClientKey == "" || c.GrpcCABundle == "" {
return nil, fmt.Errorf("you must supply all TLS parameters - %q, %q, %q, or none of them",
"--envoy-cafile", "--envoy-cert-file", "--envoy-key-file")
}
if c.ResourcesDir == "" {
|
// of xDS certificate files in this case.
steps = append(steps,
func(*BootstrapConfig) (string, proto.Message) {
b := bootstrapConfig(c)
b.StaticResources.Clusters[0].TransportSocket = UpstreamTLSTransportSocket(
upstreamFileTLSContext(c))
return c.Path, b
})
return steps, nil
}
// xDS certificate rotation is supported by Envoy by using SDS path based resource files.
// These files are JSON representation of the SDS protobuf messages that normally get sent over the xDS connection,
// but for xDS connection itself, bootstrapping is done by storing the SDS resources in a local filesystem.
// Envoy will monitor and reload the resource files and the certificate and key files referred from the SDS resources.
//
// Two files are written to ResourcesDir:
// - SDS resource for xDS client certificate and key for authenticating Envoy towards Contour.
// - SDS resource for trusted CA certificate for validating Contour server certificate.
sdsTLSCertificatePath := path.Join(c.ResourcesDir, sdsResourcesSubdirectory, sdsTLSCertificateFile)
sdsValidationContextPath := path.Join(c.ResourcesDir, sdsResourcesSubdirectory, sdsValidationContextFile)
steps = append(steps,
func(*BootstrapConfig) (string, proto.Message) {
return sdsTLSCertificatePath, tlsCertificateSdsSecretConfig(c)
},
func(*BootstrapConfig) (string, proto.Message) {
return sdsValidationContextPath, validationContextSdsSecretConfig(c)
},
func(*BootstrapConfig) (string, proto.Message) {
b := bootstrapConfig(c)
b.StaticResources.Clusters[0].TransportSocket = UpstreamTLSTransportSocket(
upstreamSdsTLSContext(sdsTLSCertificatePath, sdsValidationContextPath))
return c.Path, b
},
)
return steps, nil
}
func bootstrapConfig(c *BootstrapConfig) *envoy_api_bootstrap.Bootstrap {
return &envoy_api_bootstrap.Bootstrap{
DynamicResources: &envoy_api_bootstrap.Bootstrap_DynamicResources{
LdsConfig: ConfigSource("contour"),
CdsConfig: ConfigSource("contour"),
},
StaticResources: &envoy_api_bootstrap.Bootstrap_StaticResources{
Clusters: []*api.Cluster{{
Name: "contour",
AltStatName: strings.Join([]string{c.Namespace, "contour", strconv.Itoa(c.xdsGRPCPort())}, "_"),
ConnectTimeout: protobuf.Duration(5 * time.Second),
ClusterDiscoveryType: ClusterDiscoveryType(api.Cluster_STRICT_DNS),
LbPolicy: api.Cluster_ROUND_ROBIN,
LoadAssignment: &api.ClusterLoadAssignment{
ClusterName: "contour",
Endpoints: Endpoints(
SocketAddress(c.xdsAddress(), c.xdsGRPCPort()),
),
},
UpstreamConnectionOptions: &api.UpstreamConnectionOptions{
TcpKeepalive: &envoy_api_v2_core.TcpKeepalive{
KeepaliveProbes: protobuf.UInt32(3),
KeepaliveTime: protobuf.UInt32(30),
KeepaliveInterval: protobuf.UInt32(5),
},
},
Http2ProtocolOptions: new(envoy_api_v2_core.Http2ProtocolOptions), // enables http2
CircuitBreakers: &clusterv2.CircuitBreakers{
Thresholds: []*clusterv2.CircuitBreakers_Thresholds{{
Priority: envoy_api_v2_core.RoutingPriority_HIGH,
MaxConnections: protobuf.UInt32(100000),
MaxPendingRequests: protobuf.UInt32(100000),
MaxRequests: protobuf.UInt32(60000000),
MaxRetries: protobuf.UInt32(50),
}, {
Priority: envoy_api_v2_core.RoutingPriority_DEFAULT,
MaxConnections: protobuf.UInt32(100000),
MaxPendingRequests: protobuf.UInt32(100000),
MaxRequests: protobuf.UInt32(60000000),
MaxRetries: protobuf.UInt32(50),
}},
},
}, {
Name: "service-stats",
AltStatName: strings.Join([]string{c.Namespace, "service-stats", strconv.Itoa(c.adminPort())}, "_"),
ConnectTimeout: protobuf.Duration(250 * time.Millisecond),
ClusterDiscoveryType: ClusterDiscoveryType(api.Cluster_LOGICAL_DNS),
LbPolicy: api.Cluster_ROUND_ROBIN,
LoadAssignment: &api.ClusterLoadAssignment{
ClusterName: "service-stats",
Endpoints: Endpoints(
SocketAddress(c.adminAddress(), c.adminPort()),
),
},
}},
},
Admin: &envoy_api_bootstrap.Admin{
AccessLogPath: c.adminAccessLogPath(),
Address: SocketAddress(c.adminAddress(), c.adminPort()),
},
}
}
func upstreamFileTLSContext(c *BootstrapConfig) *envoy_api_v2_auth.UpstreamTlsContext {
context := &envoy_api_v2_auth.UpstreamTlsContext{
CommonTlsContext: &envoy_api_v2_auth.CommonTlsContext{
TlsCertificates: []*envoy_api_v2_auth.TlsCertificate{{
CertificateChain: &envoy_api_v2_core.DataSource{
Specifier: &envoy_api_v2_core.DataSource_Filename{
Filename: c.GrpcClientCert,
},
},
PrivateKey: &envoy_api_v2_core.DataSource{
Specifier: &envoy_api_v2_core.DataSource_Filename{
Filename: c.GrpcClientKey,
},
},
}},
ValidationContextType: &envoy_api_v2_auth.CommonTlsContext_ValidationContext{
ValidationContext: &envoy_api_v2_auth.CertificateValidationContext{
TrustedCa: &envoy_api_v2_core.DataSource{
Specifier: &envoy_api_v2_core.DataSource_Filename{
Filename: c.GrpcCABundle,
},
},
// TODO(youngnick): Does there need to be a flag wired down to here?
MatchSubjectAltNames: []*matcher.StringMatcher{{
MatchPattern: &matcher.StringMatcher_Exact{
Exact: "contour",
}},
},
},
},
},
}
return context
}
func upstreamSdsTLSContext(certificateSdsFile, validationSdsFile string) *envoy_api_v2_auth.UpstreamTlsContext {
context := &envoy_api_v2_auth.UpstreamTlsContext{
CommonTlsContext: &envoy_api_v2_auth.CommonTlsContext{
TlsCertificateSdsSecretConfigs: []*envoy_api_v2_auth.SdsSecretConfig{{
SdsConfig: &envoy_api_v2_core.ConfigSource{
ConfigSourceSpecifier: &envoy_api_v2_core.ConfigSource_Path{
Path: certificateSdsFile,
},
},
}},
ValidationContextType: &envoy_api_v2_auth.CommonTlsContext_ValidationContextSdsSecretConfig{
ValidationContextSdsSecretConfig: &envoy_api_v2_auth.SdsSecretConfig{
SdsConfig: &envoy_api_v2_core.ConfigSource{
ConfigSourceSpecifier: &envoy_api_v2_core.ConfigSource_Path{
Path: validationSdsFile,
},
},
},
},
},
}
return context
}
// tlsCertificateSdsSecretConfig creates DiscoveryResponse with file based SDS resource
// including paths to TLS certificates and key
func tlsCertificateSdsSecretConfig(c *BootstrapConfig) *api.DiscoveryResponse {
secret := &envoy_api_v2_auth.Secret{
Type: &envoy_api_v2_auth.Secret_TlsCertificate{
TlsCertificate: &envoy_api_v2_auth.TlsCertificate{
CertificateChain: &envoy_api_v2_core.DataSource{
Specifier: &envoy_api_v2_core.DataSource_Filename{
Filename: c.GrpcClientCert,
},
},
PrivateKey: &envoy_api_v2_core.DataSource{
Specifier: &envoy_api_v2_core.DataSource_Filename{
Filename: c.GrpcClientKey,
},
},
},
},
}
return &api.DiscoveryResponse{
Resources: []*any.Any{protobuf.MustMarshalAny(secret)},
}
}
// validationContextSdsSecretConfig creates DiscoveryResponse with file based SDS resource
// including path to CA certificate bundle
func validationContextSdsSecretConfig(c *BootstrapConfig) *api.DiscoveryResponse {
secret := &envoy_api_v2_auth.Secret{
Type: &envoy_api_v2_auth.Secret_ValidationContext{
ValidationContext: &envoy_api_v2_auth.CertificateValidationContext{
TrustedCa: &envoy_api_v2_core.DataSource{
Specifier: &envoy_api_v2_core.DataSource_Filename{
Filename: c.GrpcCABundle,
},
},
MatchSubjectAltNames: []*matcher.StringMatcher{{
MatchPattern: &matcher.StringMatcher_Exact{
Exact: "contour",
}},
},
},
},
}
return &api.DiscoveryResponse{
Resources: []*any.Any{protobuf.MustMarshalAny(secret)},
}
}
// BootstrapConfig holds configuration values for a v2.Bootstrap.
type BootstrapConfig struct {
// AdminAccessLogPath is the path to write the access log for the administration server.
// Defaults to /dev/null.
AdminAccessLogPath string
// AdminAddress is the TCP address that the administration server will listen on.
// Defaults to 127.0.0.1.
AdminAddress string
// AdminPort is the port that the administration server will listen on.
// Defaults to 9001.
AdminPort int
// XDSAddress is the TCP address of the gRPC XDS management server.
// Defaults to 127.0.0.1.
XDSAddress string
// XDSGRPCPort is the management server port that provides the v2 gRPC API.
// Defaults to 8001.
XDSGRPCPort int
// Namespace is the namespace where Contour is running
Namespace string
//GrpcCABundle is the filename that contains a CA certificate chain that can
//verify the client cert.
GrpcCABundle string
// GrpcClientCert is the filename that contains a client certificate. May contain a full bundle if you
// don't want to pass a CA Bundle.
GrpcClientCert string
// GrpcClientKey is the filename that contains a client key for secure gRPC with TLS.
GrpcClientKey string
// Path is the filename for the bootstrap configuration file to be created.
Path string
// ResourcesDir is the directory where out of line Envoy resources can be placed.
ResourcesDir string
}
func (c *BootstrapConfig) xdsAddress() string { return stringOrDefault(c.XDSAddress, "127.0.0.1") }
func (c *BootstrapConfig) xdsGRPCPort() int { return intOrDefault(c.XDSGRPCPort, 8001) }
func (c *BootstrapConfig) adminAddress() string { return stringOrDefault(c.AdminAddress, "127.0.0.1") }
func (c *BootstrapConfig) adminPort() int { return intOrDefault(c.AdminPort, 9001) }
func (c *BootstrapConfig) adminAccessLogPath() string {
return stringOrDefault(c.AdminAccessLogPath, "/dev/null")
}
func stringOrDefault(s, def string) string {
if s == "" {
return def
}
return s
}
func intOrDefault(i, def int) int {
if i == 0 {
return def
}
return i
}
func writeConfig(filename string, config proto.Message) (err error) {
var out *os.File
if filename == "-" {
out = os.Stdout
} else {
out, err = os.Create(filename)
if err != nil {
return
}
defer func() {
err = out.Close()
}()
}
m := &jsonpb.Marshaler{OrigName: true}
return m.Marshal(out, config)
}
|
// For backwards compatibility, the old behavior
// is to use direct certificate and key file paths in
// bootstrap config. Envoy does not support rotation
|
float_ord.rs
|
use crate::bytes_of;
use std::{
cmp::Ordering,
hash::{Hash, Hasher},
ops::Neg,
};
/// A wrapper type that enables ordering floats. This is a work around for the famous "rust float
/// ordering" problem. By using it, you acknowledge that sorting NaN is undefined according to spec.
/// This implementation treats NaN as the "smallest" float.
#[derive(Debug, Copy, Clone, PartialOrd)]
pub struct
|
(pub f32);
#[allow(clippy::derive_ord_xor_partial_ord)]
impl Ord for FloatOrd {
fn cmp(&self, other: &Self) -> Ordering {
self.0.partial_cmp(&other.0).unwrap_or_else(|| {
if self.0.is_nan() && !other.0.is_nan() {
Ordering::Less
} else if !self.0.is_nan() && other.0.is_nan() {
Ordering::Greater
} else {
Ordering::Equal
}
})
}
}
impl PartialEq for FloatOrd {
fn eq(&self, other: &Self) -> bool {
if self.0.is_nan() && other.0.is_nan() {
true
} else {
self.0 == other.0
}
}
}
impl Eq for FloatOrd {}
impl Hash for FloatOrd {
fn hash<H: Hasher>(&self, state: &mut H) {
if self.0.is_nan() {
// Ensure all NaN representations hash to the same value
state.write(bytes_of(&f32::NAN))
} else if self.0 == 0.0 {
// Ensure both zeroes hash to the same value
state.write(bytes_of(&0.0f32))
} else {
state.write(bytes_of(&self.0));
}
}
}
impl Neg for FloatOrd {
type Output = FloatOrd;
fn neg(self) -> Self::Output {
FloatOrd(-self.0)
}
}
|
FloatOrd
|
tutorial.rs
|
// Autogenerated by Thrift Compiler ()
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#![allow(unused_imports)]
#![allow(unused_extern_crates)]
#![cfg_attr(feature = "cargo-clippy", allow(too_many_arguments, type_complexity))]
#![cfg_attr(rustfmt, rustfmt_skip)]
extern crate async_thrift;
use std::cell::RefCell;
use std::collections::{BTreeMap, BTreeSet};
use std::convert::{From, TryFrom};
use std::default::Default;
use std::error::Error;
use std::fmt;
use std::fmt::{Display, Formatter};
use std::rc::Rc;
use async_trait::async_trait;
use async_thrift::{ApplicationError, ApplicationErrorKind, ProtocolError, ProtocolErrorKind, TThriftClient};
use async_thrift::OrderedFloat;
use async_thrift::protocol::{TAsyncInputProtocol, TAsyncOutputProtocol, TFieldIdentifier, TListIdentifier, TMapIdentifier, TMessageIdentifier, TMessageType, TSetIdentifier, TStructIdentifier, TType};
use async_thrift::protocol::field_id;
use async_thrift::protocol::verify_expected_message_type;
use async_thrift::protocol::verify_expected_sequence_number;
use async_thrift::protocol::verify_expected_service_call;
use async_thrift::protocol::verify_required_field_exists;
use async_thrift::server::TAsyncProcessor;
//
// Calculator service client
//
#[async_trait]
pub trait TCalculatorSyncClient {
async fn ping(&mut self) -> async_thrift::Result<()>;
}
pub trait TCalculatorSyncClientMarker {}
pub struct CalculatorSyncClient<IP, OP> where IP: TAsyncInputProtocol, OP: TAsyncOutputProtocol {
_i_prot: IP,
_o_prot: OP,
_sequence_number: i32,
}
impl<IP, OP> CalculatorSyncClient<IP, OP> where IP: TAsyncInputProtocol, OP: TAsyncOutputProtocol {
pub fn new(input_protocol: IP, output_protocol: OP) -> CalculatorSyncClient<IP, OP>
|
}
impl<IP, OP> TThriftClient for CalculatorSyncClient<IP, OP> where IP: TAsyncInputProtocol, OP: TAsyncOutputProtocol {
fn i_prot_mut(&mut self) -> &mut (dyn TAsyncInputProtocol + Send) { &mut self._i_prot }
fn o_prot_mut(&mut self) -> &mut (dyn TAsyncOutputProtocol + Send) { &mut self._o_prot }
fn sequence_number(&self) -> i32 { self._sequence_number }
fn increment_sequence_number(&mut self) -> i32 {
self._sequence_number += 1;
self._sequence_number
}
}
impl<IP, OP> TCalculatorSyncClientMarker for CalculatorSyncClient<IP, OP> where IP: TAsyncInputProtocol, OP: TAsyncOutputProtocol {}
#[async_trait]
impl<C: TThriftClient + TCalculatorSyncClientMarker + Send> TCalculatorSyncClient for C {
async fn ping(&mut self) -> async_thrift::Result<()> {
(
{
self.increment_sequence_number();
let message_ident = TMessageIdentifier::new("ping", TMessageType::Call, self.sequence_number());
let call_args = CalculatorPingArgs {};
self.o_prot_mut().write_message_begin(&message_ident).await?;
call_args.write_to_out_protocol(self.o_prot_mut()).await?;
self.o_prot_mut().write_message_end().await?;
self.o_prot_mut().flush().await
}
)?;
{
let message_ident = self.i_prot_mut().read_message_begin().await?;
verify_expected_sequence_number(self.sequence_number(), message_ident.sequence_number)?;
verify_expected_service_call("ping", &message_ident.name)?;
if message_ident.message_type == TMessageType::Exception {
let remote_error = async_thrift::Error::read_application_error_from_in_protocol(self.i_prot_mut()).await?;
self.i_prot_mut().read_message_end().await?;
return Err(async_thrift::Error::Application(remote_error));
}
verify_expected_message_type(TMessageType::Reply, message_ident.message_type)?;
let result = CalculatorPingResult::read_from_in_protocol(self.i_prot_mut()).await?;
self.i_prot_mut().read_message_end().await?;
result.ok_or()
}
}
}
//
// Calculator service processor
//
#[async_trait]
pub trait CalculatorSyncHandler {
async fn handle_ping(&self) -> async_thrift::Result<()>;
}
pub struct CalculatorSyncProcessor<H: CalculatorSyncHandler> {
handler: H,
}
impl<H: CalculatorSyncHandler> CalculatorSyncProcessor<H> {
pub fn new(handler: H) -> CalculatorSyncProcessor<H> {
CalculatorSyncProcessor {
handler,
}
}
async fn process_ping(&self, incoming_sequence_number: i32, i_prot: &mut (dyn TAsyncInputProtocol + Send), o_prot: &mut (dyn TAsyncOutputProtocol + Send)) -> async_thrift::Result<()> {
TCalculatorProcessFunctions::process_ping(&self.handler, incoming_sequence_number, i_prot, o_prot).await
}
}
pub struct TCalculatorProcessFunctions;
impl TCalculatorProcessFunctions {
pub async fn process_ping<H: CalculatorSyncHandler>(handler: &H, incoming_sequence_number: i32, i_prot: &mut (dyn TAsyncInputProtocol + Send), o_prot: &mut (dyn TAsyncOutputProtocol + Send)) -> async_thrift::Result<()> {
let _ = CalculatorPingArgs::read_from_in_protocol(i_prot).await?;
match handler.handle_ping().await {
Ok(_) => {
let message_ident = TMessageIdentifier::new("ping", TMessageType::Reply, incoming_sequence_number);
o_prot.write_message_begin(&message_ident).await?;
let ret = CalculatorPingResult {};
ret.write_to_out_protocol(o_prot).await?;
o_prot.write_message_end().await?;
o_prot.flush().await
}
Err(e) => {
match e {
async_thrift::Error::Application(app_err) => {
let message_ident = TMessageIdentifier::new("ping", TMessageType::Exception, incoming_sequence_number);
o_prot.write_message_begin(&message_ident).await?;
async_thrift::Error::write_application_error_to_out_protocol(&app_err, o_prot).await?;
o_prot.write_message_end().await?;
o_prot.flush().await
}
_ => {
let ret_err = {
ApplicationError::new(
ApplicationErrorKind::Unknown,
e.description(),
)
};
let message_ident = TMessageIdentifier::new("ping", TMessageType::Exception, incoming_sequence_number);
o_prot.write_message_begin(&message_ident).await?;
async_thrift::Error::write_application_error_to_out_protocol(&ret_err, o_prot).await?;
o_prot.write_message_end().await?;
o_prot.flush().await
}
}
}
}
}
}
#[async_trait]
impl<H: CalculatorSyncHandler + Send + Sync> TAsyncProcessor for CalculatorSyncProcessor<H> {
async fn process(&self, i_prot: &mut (dyn TAsyncInputProtocol + Send), o_prot: &mut (dyn TAsyncOutputProtocol + Send)) -> async_thrift::Result<()> {
let message_ident = i_prot.read_message_begin().await?;
let res = match &*message_ident.name {
"ping" => {
self.process_ping(message_ident.sequence_number, i_prot, o_prot).await
}
method => {
Err(
async_thrift::Error::Application(
ApplicationError::new(
ApplicationErrorKind::UnknownMethod,
format!("unknown method {}", method),
)
)
)
}
};
async_thrift::server::handle_process_result(&message_ident, res, o_prot).await
}
}
//
// CalculatorPingArgs
//
#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
struct CalculatorPingArgs {}
impl CalculatorPingArgs {
async fn read_from_in_protocol(i_prot: &mut (dyn TAsyncInputProtocol + Send)) -> async_thrift::Result<CalculatorPingArgs> {
i_prot.read_struct_begin().await?;
loop {
let field_ident = i_prot.read_field_begin().await?;
if field_ident.field_type == TType::Stop {
break;
}
let field_id = field_id(&field_ident)?;
match field_id {
_ => {
i_prot.skip(field_ident.field_type).await?;
}
};
i_prot.read_field_end().await?;
}
i_prot.read_struct_end().await?;
let ret = CalculatorPingArgs {};
Ok(ret)
}
async fn write_to_out_protocol(&self, o_prot: &mut (dyn TAsyncOutputProtocol + Send)) -> async_thrift::Result<()> {
let struct_ident = TStructIdentifier::new("ping_args");
o_prot.write_struct_begin(&struct_ident).await?;
o_prot.write_field_stop().await?;
o_prot.write_struct_end().await
}
}
//
// CalculatorPingResult
//
#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
struct CalculatorPingResult {}
impl CalculatorPingResult {
async fn read_from_in_protocol(i_prot: &mut (dyn TAsyncInputProtocol + Send)) -> async_thrift::Result<CalculatorPingResult> {
i_prot.read_struct_begin().await?;
loop {
let field_ident = i_prot.read_field_begin().await?;
if field_ident.field_type == TType::Stop {
break;
}
let field_id = field_id(&field_ident)?;
match field_id {
_ => {
i_prot.skip(field_ident.field_type).await?;
}
};
i_prot.read_field_end().await?;
}
i_prot.read_struct_end().await?;
let ret = CalculatorPingResult {};
Ok(ret)
}
async fn write_to_out_protocol(&self, o_prot: &mut (dyn TAsyncOutputProtocol + Send)) -> async_thrift::Result<()> {
let struct_ident = TStructIdentifier::new("CalculatorPingResult");
o_prot.write_struct_begin(&struct_ident).await?;
o_prot.write_field_stop().await?;
o_prot.write_struct_end().await
}
fn ok_or(self) -> async_thrift::Result<()> {
Ok(())
}
}
|
{
CalculatorSyncClient { _i_prot: input_protocol, _o_prot: output_protocol, _sequence_number: 0 }
}
|
_inputs.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'AutoScalingConfigurationAutoScalingResourcesArgs',
'AutoScalingConfigurationPolicyArgs',
'AutoScalingConfigurationPolicyCapacityArgs',
'AutoScalingConfigurationPolicyExecutionScheduleArgs',
'AutoScalingConfigurationPolicyResourceActionArgs',
'AutoScalingConfigurationPolicyRuleArgs',
'AutoScalingConfigurationPolicyRuleActionArgs',
'AutoScalingConfigurationPolicyRuleMetricArgs',
'AutoScalingConfigurationPolicyRuleMetricThresholdArgs',
'GetAutoScalingConfigurationsFilterArgs',
]
@pulumi.input_type
class AutoScalingConfigurationAutoScalingResourcesArgs:
def __init__(__self__, *,
id: pulumi.Input[str],
type: pulumi.Input[str]):
"""
:param pulumi.Input[str] id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the resource that is managed by the autoscaling configuration.
:param pulumi.Input[str] type: The type of action to take.
"""
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the resource that is managed by the autoscaling configuration.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
|
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@pulumi.input_type
class AutoScalingConfigurationPolicyArgs:
def __init__(__self__, *,
policy_type: pulumi.Input[str],
capacity: Optional[pulumi.Input['AutoScalingConfigurationPolicyCapacityArgs']] = None,
display_name: Optional[pulumi.Input[str]] = None,
execution_schedule: Optional[pulumi.Input['AutoScalingConfigurationPolicyExecutionScheduleArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None,
resource_action: Optional[pulumi.Input['AutoScalingConfigurationPolicyResourceActionArgs']] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['AutoScalingConfigurationPolicyRuleArgs']]]] = None,
time_created: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] policy_type: The type of autoscaling policy.
:param pulumi.Input['AutoScalingConfigurationPolicyCapacityArgs'] capacity: The capacity requirements of the autoscaling policy.
:param pulumi.Input[str] display_name: A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
:param pulumi.Input['AutoScalingConfigurationPolicyExecutionScheduleArgs'] execution_schedule: An execution schedule for an autoscaling policy.
:param pulumi.Input[str] id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the resource that is managed by the autoscaling configuration.
:param pulumi.Input[bool] is_enabled: Whether the autoscaling policy is enabled.
:param pulumi.Input['AutoScalingConfigurationPolicyResourceActionArgs'] resource_action: An action that can be executed against a resource.
:param pulumi.Input[str] time_created: The date and time the autoscaling configuration was created, in the format defined by RFC3339. Example: `2016-08-25T21:10:29.600Z`
"""
pulumi.set(__self__, "policy_type", policy_type)
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if execution_schedule is not None:
pulumi.set(__self__, "execution_schedule", execution_schedule)
if id is not None:
pulumi.set(__self__, "id", id)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
if resource_action is not None:
pulumi.set(__self__, "resource_action", resource_action)
if rules is not None:
pulumi.set(__self__, "rules", rules)
if time_created is not None:
pulumi.set(__self__, "time_created", time_created)
@property
@pulumi.getter(name="policyType")
def policy_type(self) -> pulumi.Input[str]:
"""
The type of autoscaling policy.
"""
return pulumi.get(self, "policy_type")
@policy_type.setter
def policy_type(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_type", value)
@property
@pulumi.getter
def capacity(self) -> Optional[pulumi.Input['AutoScalingConfigurationPolicyCapacityArgs']]:
"""
The capacity requirements of the autoscaling policy.
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: Optional[pulumi.Input['AutoScalingConfigurationPolicyCapacityArgs']]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="executionSchedule")
def execution_schedule(self) -> Optional[pulumi.Input['AutoScalingConfigurationPolicyExecutionScheduleArgs']]:
"""
An execution schedule for an autoscaling policy.
"""
return pulumi.get(self, "execution_schedule")
@execution_schedule.setter
def execution_schedule(self, value: Optional[pulumi.Input['AutoScalingConfigurationPolicyExecutionScheduleArgs']]):
pulumi.set(self, "execution_schedule", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the resource that is managed by the autoscaling configuration.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the autoscaling policy is enabled.
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="resourceAction")
def resource_action(self) -> Optional[pulumi.Input['AutoScalingConfigurationPolicyResourceActionArgs']]:
"""
An action that can be executed against a resource.
"""
return pulumi.get(self, "resource_action")
@resource_action.setter
def resource_action(self, value: Optional[pulumi.Input['AutoScalingConfigurationPolicyResourceActionArgs']]):
pulumi.set(self, "resource_action", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AutoScalingConfigurationPolicyRuleArgs']]]]:
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AutoScalingConfigurationPolicyRuleArgs']]]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> Optional[pulumi.Input[str]]:
"""
The date and time the autoscaling configuration was created, in the format defined by RFC3339. Example: `2016-08-25T21:10:29.600Z`
"""
return pulumi.get(self, "time_created")
@time_created.setter
def time_created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_created", value)
@pulumi.input_type
class AutoScalingConfigurationPolicyCapacityArgs:
def __init__(__self__, *,
initial: Optional[pulumi.Input[int]] = None,
max: Optional[pulumi.Input[int]] = None,
min: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] initial: For a threshold-based autoscaling policy, this value is the initial number of instances to launch in the instance pool immediately after autoscaling is enabled. After autoscaling retrieves performance metrics, the number of instances is automatically adjusted from this initial number to a number that is based on the limits that you set.
:param pulumi.Input[int] max: For a threshold-based autoscaling policy, this value is the maximum number of instances the instance pool is allowed to increase to (scale out).
:param pulumi.Input[int] min: For a threshold-based autoscaling policy, this value is the minimum number of instances the instance pool is allowed to decrease to (scale in).
"""
if initial is not None:
pulumi.set(__self__, "initial", initial)
if max is not None:
pulumi.set(__self__, "max", max)
if min is not None:
pulumi.set(__self__, "min", min)
@property
@pulumi.getter
def initial(self) -> Optional[pulumi.Input[int]]:
"""
For a threshold-based autoscaling policy, this value is the initial number of instances to launch in the instance pool immediately after autoscaling is enabled. After autoscaling retrieves performance metrics, the number of instances is automatically adjusted from this initial number to a number that is based on the limits that you set.
"""
return pulumi.get(self, "initial")
@initial.setter
def initial(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial", value)
@property
@pulumi.getter
def max(self) -> Optional[pulumi.Input[int]]:
"""
For a threshold-based autoscaling policy, this value is the maximum number of instances the instance pool is allowed to increase to (scale out).
"""
return pulumi.get(self, "max")
@max.setter
def max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max", value)
@property
@pulumi.getter
def min(self) -> Optional[pulumi.Input[int]]:
"""
For a threshold-based autoscaling policy, this value is the minimum number of instances the instance pool is allowed to decrease to (scale in).
"""
return pulumi.get(self, "min")
@min.setter
def min(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min", value)
@pulumi.input_type
class AutoScalingConfigurationPolicyExecutionScheduleArgs:
def __init__(__self__, *,
expression: pulumi.Input[str],
timezone: pulumi.Input[str],
type: pulumi.Input[str]):
"""
:param pulumi.Input[str] expression: A cron expression that represents the time at which to execute the autoscaling policy.
:param pulumi.Input[str] timezone: The time zone for the execution schedule.
:param pulumi.Input[str] type: The type of action to take.
"""
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "timezone", timezone)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def expression(self) -> pulumi.Input[str]:
"""
A cron expression that represents the time at which to execute the autoscaling policy.
"""
return pulumi.get(self, "expression")
@expression.setter
def expression(self, value: pulumi.Input[str]):
pulumi.set(self, "expression", value)
@property
@pulumi.getter
def timezone(self) -> pulumi.Input[str]:
"""
The time zone for the execution schedule.
"""
return pulumi.get(self, "timezone")
@timezone.setter
def timezone(self, value: pulumi.Input[str]):
pulumi.set(self, "timezone", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of action to take.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@pulumi.input_type
class AutoScalingConfigurationPolicyResourceActionArgs:
def __init__(__self__, *,
action: pulumi.Input[str],
action_type: pulumi.Input[str]):
"""
:param pulumi.Input[str] action: The action to take when autoscaling is triggered.
:param pulumi.Input[str] action_type: The type of resource action.
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "action_type", action_type)
@property
@pulumi.getter
def action(self) -> pulumi.Input[str]:
"""
The action to take when autoscaling is triggered.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input[str]):
pulumi.set(self, "action", value)
@property
@pulumi.getter(name="actionType")
def action_type(self) -> pulumi.Input[str]:
"""
The type of resource action.
"""
return pulumi.get(self, "action_type")
@action_type.setter
def action_type(self, value: pulumi.Input[str]):
pulumi.set(self, "action_type", value)
@pulumi.input_type
class AutoScalingConfigurationPolicyRuleArgs:
def __init__(__self__, *,
display_name: pulumi.Input[str],
action: Optional[pulumi.Input['AutoScalingConfigurationPolicyRuleActionArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
metric: Optional[pulumi.Input['AutoScalingConfigurationPolicyRuleMetricArgs']] = None):
"""
:param pulumi.Input[str] display_name: A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
:param pulumi.Input['AutoScalingConfigurationPolicyRuleActionArgs'] action: The action to take when autoscaling is triggered.
:param pulumi.Input[str] id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the resource that is managed by the autoscaling configuration.
:param pulumi.Input['AutoScalingConfigurationPolicyRuleMetricArgs'] metric: Metric and threshold details for triggering an autoscaling action.
"""
pulumi.set(__self__, "display_name", display_name)
if action is not None:
pulumi.set(__self__, "action", action)
if id is not None:
pulumi.set(__self__, "id", id)
if metric is not None:
pulumi.set(__self__, "metric", metric)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Input[str]:
"""
A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: pulumi.Input[str]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input['AutoScalingConfigurationPolicyRuleActionArgs']]:
"""
The action to take when autoscaling is triggered.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: Optional[pulumi.Input['AutoScalingConfigurationPolicyRuleActionArgs']]):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the resource that is managed by the autoscaling configuration.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def metric(self) -> Optional[pulumi.Input['AutoScalingConfigurationPolicyRuleMetricArgs']]:
"""
Metric and threshold details for triggering an autoscaling action.
"""
return pulumi.get(self, "metric")
@metric.setter
def metric(self, value: Optional[pulumi.Input['AutoScalingConfigurationPolicyRuleMetricArgs']]):
pulumi.set(self, "metric", value)
@pulumi.input_type
class AutoScalingConfigurationPolicyRuleActionArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] type: The type of action to take.
"""
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of action to take.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class AutoScalingConfigurationPolicyRuleMetricArgs:
def __init__(__self__, *,
metric_type: Optional[pulumi.Input[str]] = None,
threshold: Optional[pulumi.Input['AutoScalingConfigurationPolicyRuleMetricThresholdArgs']] = None):
if metric_type is not None:
pulumi.set(__self__, "metric_type", metric_type)
if threshold is not None:
pulumi.set(__self__, "threshold", threshold)
@property
@pulumi.getter(name="metricType")
def metric_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "metric_type")
@metric_type.setter
def metric_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metric_type", value)
@property
@pulumi.getter
def threshold(self) -> Optional[pulumi.Input['AutoScalingConfigurationPolicyRuleMetricThresholdArgs']]:
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: Optional[pulumi.Input['AutoScalingConfigurationPolicyRuleMetricThresholdArgs']]):
pulumi.set(self, "threshold", value)
@pulumi.input_type
class AutoScalingConfigurationPolicyRuleMetricThresholdArgs:
def __init__(__self__, *,
operator: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] operator: The comparison operator to use. Options are greater than (`GT`), greater than or equal to (`GTE`), less than (`LT`), and less than or equal to (`LTE`).
"""
if operator is not None:
pulumi.set(__self__, "operator", operator)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def operator(self) -> Optional[pulumi.Input[str]]:
"""
The comparison operator to use. Options are greater than (`GT`), greater than or equal to (`GTE`), less than (`LT`), and less than or equal to (`LTE`).
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class GetAutoScalingConfigurationsFilterArgs:
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@name.setter
def name(self, value: str):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@values.setter
def values(self, value: Sequence[str]):
pulumi.set(self, "values", value)
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@regex.setter
def regex(self, value: Optional[bool]):
pulumi.set(self, "regex", value)
|
"""
The type of action to take.
"""
return pulumi.get(self, "type")
|
7fb7364b821a_add_username_to_loadtbl.py
|
"""add username to LoadTbl
Revision ID: 7fb7364b821a
Revises: 090128c02529
Create Date: 2018-10-24 17:10:03.781293
"""
# revision identifiers, used by Alembic.
revision = '7fb7364b821a'
down_revision = '090128c02529'
import sqlalchemy as sa
from alembic import op
def upgrade():
|
def downgrade():
op.drop_constraint('LoadTbl_ibfk_2', 'LoadTbl', type_='foreignkey')
op.drop_column('LoadTbl', 'username')
|
op.add_column('LoadTbl', sa.Column('username', sa.String(45)))
op.create_foreign_key('LoadTbl_ibfk_2', 'LoadTbl', 'UserTbl', ['username'], ['name'])
|
console.py
|
class
|
:
def __init__(self):
pass
def add(self, a, b):
return a + b
def divide(self, a, b):
return b / a
# Todo: Add subtract option
# def root(a):
# return math.sqrt()
def greetings(name):
print('Hello ' + name + '!')
def goodbye():
print('Goodbye!')
myCalculator = Calculator
myCalculator.subtract()
# execfile('console.py')
# exec('console.py')
|
Calculator
|
sdl_cgo_static.go
|
// +build static
package sdl
|
//#cgo linux,amd64 LDFLAGS: -lSDL2_linux_amd64 -lm -ldl -lasound -lm -ldl -lpthread -lX11 -lXext -lXcursor -lXinerama -lXi -lXrandr -lXss -lXxf86vm -lpthread -lrt
//#cgo windows,386 LDFLAGS: -lSDL2_windows_386 -lSDL2main_windows_386 -lm -ldinput8 -ldxguid -ldxerr8 -luser32 -lgdi32 -lwinmm -limm32 -lole32 -loleaut32 -lshell32 -lsetupapi -lversion -luuid -static-libgcc
//#cgo windows,amd64 LDFLAGS: -lSDL2_windows_amd64 -lSDL2main_windows_amd64 -lm -ldinput8 -ldxguid -ldxerr8 -luser32 -lgdi32 -lwinmm -limm32 -lole32 -loleaut32 -lshell32 -lversion -luuid -lsetupapi -static-libgcc
//#cgo darwin,amd64 LDFLAGS: -lSDL2_darwin_amd64 -lm -liconv -Wl,-framework,OpenGL -Wl,-framework,CoreAudio -Wl,-framework,AudioToolbox -Wl,-framework,ForceFeedback -lobjc -Wl,-framework,CoreVideo -Wl,-framework,Cocoa -Wl,-framework,Carbon -Wl,-framework,IOKit -Wl,-framework,Metal
//#cgo darwin,arm64 LDFLAGS: -lSDL2_darwin_arm64 -lm -liconv -Wl,-framework,OpenGL -Wl,-framework,CoreAudio -Wl,-framework,AudioToolbox -Wl,-framework,ForceFeedback -lobjc -Wl,-framework,CoreVideo -Wl,-framework,Cocoa -Wl,-framework,Carbon -Wl,-framework,IOKit -Wl,-framework,Metal
//#cgo android,arm LDFLAGS: -lSDL2_android_arm -lm -ldl -llog -landroid -lGLESv2 -lGLESv1_CM
//#cgo linux,arm,!android LDFLAGS: -L/opt/vc/lib -L/opt/vc/lib64 -lSDL2_linux_arm -lm -ldl -liconv -lbcm_host -lvcos -lvchiq_arm -pthread
import "C"
|
//#cgo CFLAGS: -I${SRCDIR}/../.go-sdl2-libs/include -I${SRCDIR}/../.go-sdl2-libs/include/SDL2
//#cgo LDFLAGS: -L${SRCDIR}/../.go-sdl2-libs
//#cgo linux,386 LDFLAGS: -lSDL2_linux_386 -lm -ldl -lasound -lm -ldl -lpthread -lX11 -lXext -lXcursor -lXinerama -lXi -lXrandr -lXss -lXxf86vm -lpthread -lrt
|
controller.py
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manager for key bindings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
import curses.ascii
import app.config
import app.curses_util
import app.log
#import app.window
class Controller:
"""A Controller is a keyboard mapping from keyboard/mouse events to editor
commands."""
def __init__(self, view, name):
if app.config.strict_debug:
assert issubclass(self.__class__, Controller)
assert issubclass(view.__class__, app.window.Window)
self.view = view
self.commandDefault = None
self.commandSet = None
self.textBuffer = None
self.name = name
def parentController(self):
view = self.view.parent
while view is not None:
if view.controller is not None:
return view.controller
view = view.parent
def changeToConfirmClose(self):
self.findAndChangeTo('confirmClose')
def changeToConfirmOverwrite(self):
self.findAndChangeTo('confirmOverwrite')
def changeToFileManagerWindow(self, *args):
self.findAndChangeTo('fileManagerWindow')
def changeToConfirmQuit(self):
self.findAndChangeTo('interactiveQuit')
def changeToHostWindow(self, *args):
host = self.getNamedWindow('inputWindow')
if app.config.strict_debug:
assert issubclass(self.view.__class__, app.window.Window), self.view
assert issubclass(host.__class__, app.window.Window), host
self.view.changeFocusTo(host)
def changeToInputWindow(self, *args):
self.findAndChangeTo('inputWindow')
def changeToFind(self):
self.findAndChangeTo('interactiveFind')
def changeToFindPrior(self):
curses.ungetch(self.savedCh)
self.findAndChangeTo('interactiveFind')
def changeToGoto(self):
self.findAndChangeTo('interactiveGoto')
def changeToPaletteWindow(self):
self.findAndChangeTo('paletteWindow')
def changeToPopup(self):
self.findAndChangeTo('popupWindow')
def changeToPrediction(self):
self.findAndChangeTo('predictionWindow')
#self.findAndChangeTo('interactivePrediction')
def changeToPrompt(self):
self.findAndChangeTo('interactivePrompt')
def changeToQuit(self):
self.findAndChangeTo('interactiveQuit')
def changeToSaveAs(self):
view = self.getNamedWindow('fileManagerWindow')
view.setMode('saveAs')
view.bringToFront()
view.changeFocusTo(view)
def createNewTextBuffer(self):
bufferManager = self.view.program.bufferManager
self.view.setTextBuffer(bufferManager.newTextBuffer())
def doCommand(self, ch, meta):
# Check the commandSet for the input with both its string and integer
# representation.
self.savedCh = ch
cmd = (self.commandSet.get(ch) or
self.commandSet.get(app.curses_util.cursesKeyName(ch)))
if cmd:
cmd()
else:
self.commandDefault(ch, meta)
self.textBuffer.compoundChangePush()
def getNamedWindow(self, windowName):
view = self.view
while view is not None:
if hasattr(view, windowName):
return getattr(view, windowName)
view = view.parent
app.log.fatal(windowName + ' not found')
return None
def currentInputWindow(self):
return self.getNamedWindow('inputWindow')
def findAndChangeTo(self, windowName):
window = self.getNamedWindow(windowName)
window.bringToFront()
self.view.changeFocusTo(window)
def changeTo(self, window):
window.bringToFront()
self.view.changeFocusTo(window)
def focus(self):
app.log.info('base controller focus()')
def confirmationPromptFinish(self, *args):
window = self.getNamedWindow('inputWindow')
window.userIntent = 'edit'
window.bringToFront()
self.view.changeFocusTo(window)
def __closeHostFile(self, host):
"""Close the current file and switch to another or create an empty
file."""
bufferManager = host.program.bufferManager
bufferManager.closeTextBuffer(host.textBuffer)
host.userIntent = 'edit'
tb = bufferManager.getUnsavedBuffer()
if not tb:
tb = bufferManager.nextBuffer()
if not tb:
tb = bufferManager.newTextBuffer()
host.setTextBuffer(tb)
def closeFile(self):
app.log.info()
host = self.getNamedWindow('inputWindow')
self.__closeHostFile(host)
self.confirmationPromptFinish()
def closeOrConfirmClose(self):
"""If the file is clean, close it. If it is dirty, prompt the user
about whether to lose unsaved changes."""
host = self.getNamedWindow('inputWindow')
tb = host.textBuffer
if not tb.isDirty():
self.__closeHostFile(host)
return
if host.userIntent == 'edit':
host.userIntent = 'close'
self.changeToConfirmClose()
def initiateClose(self):
"""Called from input window controller."""
self.view.userIntent = 'close'
tb = self.view.textBuffer
if not tb.isDirty():
self.__closeHostFile(self.view)
return
self.view.changeFocusTo(self.view.confirmClose)
def initiateQuit(self):
"""Called from input window controller."""
self.view.userIntent = 'quit'
tb = self.view.textBuffer
if tb.isDirty():
self.view.changeFocusTo(self.view.interactiveQuit)
return
bufferManager = self.view.program.bufferManager
tb = bufferManager.getUnsavedBuffer()
if tb:
self.view.setTextBuffer(tb)
self.view.changeFocusTo(self.view.interactiveQuit)
return
bufferManager.debugLog()
self.view.quitNow()
def initiateSave(self):
"""Called from input window controller."""
self.view.userIntent = 'edit'
tb = self.view.textBuffer
if tb.fullPath:
if not tb.isSafeToWrite():
self.view.changeFocusTo(self.view.confirmOverwrite)
return
tb.fileWrite()
return
self.changeToSaveAs()
def overwriteHostFile(self):
"""Close the current file and switch to another or create an empty
file.
"""
host = self.getNamedWindow('inputWindow')
host.textBuffer.fileWrite()
if host.userIntent == 'quit':
self.quitOrSwitchToConfirmQuit()
return
if host.userIntent == 'close':
self.__closeHostFile(host)
self.changeToHostWindow()
def nextFocusableWindow(self):
window = self.view.parent.nextFocusableWindow(self.view)
if window is not None:
self.view.changeFocusTo(window)
return window is not None
def priorFocusableWindow(self):
window = self.view.parent.priorFocusableWindow(self.view)
if window is not None:
self.view.changeFocusTo(window)
return window is not None
def writeOrConfirmOverwrite(self):
"""Ask whether the file should be overwritten."""
app.log.debug()
host = self.getNamedWindow('inputWindow')
tb = host.textBuffer
if not tb.isSafeToWrite():
self.changeToConfirmOverwrite()
return
tb.fileWrite()
# TODO(dschuyler): Is there a deeper issue here that necessitates saving
# the message? Does this only need to wrap the changeToHostWindow()?
# Store the save message so it is not overwritten.
saveMessage = tb.message
if host.userIntent == 'quit':
self.quitOrSwitchToConfirmQuit()
return
if host.userIntent == 'close':
self.__closeHostFile(host)
self.changeToHostWindow()
tb.message = saveMessage # Restore the save message.
def quitOrSwitchToConfirmQuit(self):
app.log.debug(self, self.view)
host = self.getNamedWindow('inputWindow')
tb = host.textBuffer
host.userIntent = 'quit'
if tb.isDirty():
self.changeToConfirmQuit()
return
bufferManager = self.view.program.bufferManager
tb = bufferManager.getUnsavedBuffer()
if tb:
host.setTextBuffer(tb)
self.changeToConfirmQuit()
return
bufferManager.debugLog()
host.quitNow()
def saveOrChangeToSaveAs(self):
app.log.debug()
host = self.getNamedWindow('inputWindow')
if app.config.strict_debug:
assert issubclass(self.__class__, Controller), self
assert issubclass(self.view.__class__, app.window.Window), self
assert issubclass(host.__class__, app.window.Window), self
assert self.view.textBuffer is self.textBuffer
assert self.view.textBuffer is not host.textBuffer
if host.textBuffer.fullPath:
self.writeOrConfirmOverwrite()
return
self.changeToSaveAs()
def onChange(self):
pass
def saveEventChangeToHostWindow(self, *args):
curses.ungetch(self.savedCh)
host = self.getNamedWindow('inputWindow')
host.bringToFront()
self.view.changeFocusTo(host)
def setTextBuffer(self, textBuffer):
if app.config.strict_debug:
assert issubclass(textBuffer.__class__,
app.text_buffer.TextBuffer), textBuffer
assert self.view.textBuffer is textBuffer
self.textBuffer = textBuffer
def unfocus(self):
pass
class MainController:
"""The different keyboard mappings are different controllers. This class
manages a collection of keyboard mappings and allows the user to switch
between them."""
def __init__(self, view):
if app.config.strict_debug:
assert issubclass(view.__class__, app.window.Window)
self.view = view
self.commandDefault = None
self.commandSet = None
self.controllers = {}
self.controller = None
def add(self, controller):
self.controllers[controller.name] = controller
self.controller = controller
def doCommand(self, ch, meta):
self.controller.doCommand(ch, meta)
def focus(self):
app.log.info('MainController.focus')
self.controller.focus()
if 0:
|
})
self.controller.commandSet = commandSet
def onChange(self):
self.controller.onChange()
def nextController(self):
app.log.info('nextController')
if 0:
if self.controller is self.controllers['cuaPlus']:
app.log.info('MainController.nextController cua')
self.controller = self.controllers['cua']
elif self.controller is self.controllers['cua']:
app.log.info('MainController.nextController emacs')
self.controller = self.controllers['emacs']
elif self.controller is self.controllers['emacs']:
app.log.info('MainController.nextController vi')
self.controller = self.controllers['vi']
else:
app.log.info('MainController.nextController cua')
self.controller = self.controllers['cua']
self.controller.setTextBuffer(self.textBuffer)
self.focus()
def setTextBuffer(self, textBuffer):
app.log.info('MainController.setTextBuffer', self.controller)
if app.config.strict_debug:
assert issubclass(textBuffer.__class__, app.text_buffer.TextBuffer)
self.textBuffer = textBuffer
self.controller.setTextBuffer(textBuffer)
def unfocus(self):
self.controller.unfocus()
|
self.commandDefault = self.controller.commandDefault
commandSet = self.controller.commandSet.copy()
commandSet.update({
app.curses_util.KEY_F2: self.nextController,
|
top.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import os
import random
random.seed(int(os.getenv("SEED"), 16))
from prjxray import util
from prjxray.db import Database
def gen_sites():
db = Database(util.get_db_root(), util.get_part())
grid = db.grid()
for tile_name in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
if gridinfo.tile_type != 'CFG_CENTER_MID':
continue
sites = {}
for site_name, site_type in gridinfo.sites.items():
if site_type not in sites:
sites[site_type] = []
sites[site_type].append(site_name)
for site_type in sites:
sites[site_type].sort()
int_grid_x = loc.grid_x + 3
int_tile_type = 'INT_L'
int_tile_locs = []
for dy in range(-9, 12):
# Skip the VBREAK tile.
if dy != 6:
int_tile_locs.append((int_grid_x, loc.grid_y + dy), )
int_tiles = []
for int_tile_loc in int_tile_locs:
int_gridinfo = grid.gridinfo_at_loc(int_tile_loc)
assert int_gridinfo.tile_type == int_tile_type, (
int_gridinfo.tile_type, int_tile_type)
int_tiles.append(grid.tilename_at_loc(int_tile_loc))
yield tile_name, sites, int_tiles
def write_params(params):
pinstr = 'tile,val\n'
for tile, (val) in sorted(params.items()):
pinstr += '%s,%s\n' % (tile, val)
open('params.csv', 'w').write(pinstr)
def run():
print('''
module top();
''')
sites = list(gen_sites())
# Only on CFG_CENTER_MID expected.
assert len(sites) == 1
tile_name, sites, int_tiles = sites[0]
assert len(sites['ICAP']) == 2, len(sites['ICAP'])
# int_tiles[6]:
# IMUX43 -> ICAP1_I31 = 0
# IMUX42 -> ICAP1_I30 = toggle 0/1
# int_tiles[7]:
# IMUX43 -> ICAP1_I15 = 0
# IMUX42 -> ICAP1_I14 = toggle 0/1
# int_tiles[8]:
# IMUX43 -> ICAP1_CSIB = 0
# IMUX42 -> ICAP1_RDWRB = toggle 0/1
ICAP1_I30 = random.randint(0, 1)
ICAP1_I14 = random.randint(0, 1)
ICAP1_RDWRB = random.randint(0, 1)
params = {}
params[int_tiles[6]] = ICAP1_I30
params[int_tiles[7]] = ICAP1_I14
params[int_tiles[8]] = ICAP1_RDWRB
print(
"""
wire [31:0] icap_i_{site};
wire icap_rdwrd_{site};
wire icap_csib_{site};
|
assign icap_i_{site}[31] = 0;
assign icap_i_{site}[30] = {ICAP1_I30};
assign icap_i_{site}[15] = 0;
assign icap_i_{site}[14] = {ICAP1_I14};
assign icap_csib_{site} = 0;
assign icap_rdwrb_{site} = {ICAP1_RDWRB};
(* KEEP, DONT_TOUCH, LOC = "{site}" *)
ICAPE2 icap_{site} (
.I(icap_i_{site}),
.RDWRB(icap_rdwrb_{site}),
.CSIB(icap_csib_{site})
);
""".format(
site=sites['ICAP'][1],
ICAP1_I30=ICAP1_I30,
ICAP1_I14=ICAP1_I14,
ICAP1_RDWRB=ICAP1_RDWRB))
print("endmodule")
write_params(params)
if __name__ == '__main__':
run()
| |
test_examples.py
|
import os
import os.path
import re
import shutil
import mlflow
from mlflow import cli
from mlflow.utils import process
from tests.integration.utils import invoke_cli_runner
import pytest
import json
import hashlib
EXAMPLES_DIR = "examples"
def hash_conda_env(conda_env_path):
# use the same hashing logic as `_get_conda_env_name` in mlflow/utils/conda.py
return hashlib.sha1(open(conda_env_path).read().encode("utf-8")).hexdigest()
def get_conda_envs():
stdout = process.exec_cmd(["conda", "env", "list", "--json"])[1]
return [os.path.basename(env) for env in json.loads(stdout)["envs"]]
def is_mlflow_conda_env(env_name):
return re.search(r"^mlflow-\w{40}$", env_name) is not None
def remove_conda_env(env_name):
process.exec_cmd(["conda", "remove", "--name", env_name, "--yes", "--all"])
def get_free_disk_space():
# https://stackoverflow.com/a/48929832/6943581
return shutil.disk_usage("/")[-1] / (2 ** 30)
def is_conda_yaml(path):
return bool(re.search("conda.ya?ml$", path))
def find_conda_yaml(directory):
conda_yaml = list(filter(is_conda_yaml, os.listdir(directory)))[0]
return os.path.join(directory, conda_yaml)
def replace_mlflow_with_dev_version(yml_path):
with open(yml_path, "r") as f:
old_src = f.read()
mlflow_dir = os.path.dirname(mlflow.__path__[0])
new_src = re.sub(r"- mlflow.*\n", "- {}\n".format(mlflow_dir), old_src)
with open(yml_path, "w") as f:
f.write(new_src)
@pytest.fixture(scope="function", autouse=True)
def clean_envs_and_cache():
yield
if get_free_disk_space() < 7.0: # unit: GiB
process.exec_cmd(["./dev/remove-conda-envs.sh"])
@pytest.fixture(scope="function", autouse=True)
def report_free_disk_space(capsys):
yield
with capsys.disabled():
print(" | Free disk space: {:.1f} GiB".format(get_free_disk_space()), end="")
@pytest.mark.large
@pytest.mark.parametrize(
"directory, params",
[
("h2o", []),
("hyperparam", ["-e", "train", "-P", "epochs=1"]),
("hyperparam", ["-e", "random", "-P", "epochs=1"]),
("hyperparam", ["-e", "gpyopt", "-P", "epochs=1"]),
("hyperparam", ["-e", "hyperopt", "-P", "epochs=1"]),
(
"lightgbm",
["-P", "learning_rate=0.1", "-P", "colsample_bytree=0.8", "-P", "subsample=0.9"],
),
("statsmodels", ["-P", "inverse_method=qr"]),
("pytorch", ["-P", "epochs=2"]),
("sklearn_logistic_regression", []),
("sklearn_elasticnet_wine", ["-P", "alpha=0.5"]),
(os.path.join("sklearn_elasticnet_diabetes", "linux"), []),
("spacy", []),
(os.path.join("tensorflow", "tf1"), ["-P", "steps=10"]),
(
"xgboost",
["-P", "learning_rate=0.3", "-P", "colsample_bytree=0.8", "-P", "subsample=0.9"],
),
("fastai", ["-P", "lr=0.02", "-P", "epochs=3"]),
(os.path.join("pytorch", "MNIST"), ["-P", "max_epochs=1"]),
(
os.path.join("pytorch", "BertNewsClassification"),
["-P", "max_epochs=1", "-P", "num_samples=100", "-P", "dataset=20newsgroups"],
),
(
os.path.join("pytorch", "AxHyperOptimizationPTL"),
["-P", "max_epochs=10", "-P", "total_trials=1"],
),
(
os.path.join("pytorch", "IterativePruning"),
["-P", "max_epochs=1", "-P", "total_trials=1"],
),
(os.path.join("pytorch", "CaptumExample"), ["-P", "max_epochs=50"]),
],
)
def test_mlflow_run_example(directory, params, tmpdir):
example_dir = os.path.join(EXAMPLES_DIR, directory)
tmp_example_dir = os.path.join(tmpdir.strpath, directory)
shutil.copytree(example_dir, tmp_example_dir)
conda_yml_path = find_conda_yaml(tmp_example_dir)
replace_mlflow_with_dev_version(conda_yml_path)
# remove old conda environments to free disk space
envs = list(filter(is_mlflow_conda_env, get_conda_envs()))
current_env_name = "mlflow-" + hash_conda_env(conda_yml_path)
envs_to_remove = list(filter(lambda e: e != current_env_name, envs))
for env in envs_to_remove:
remove_conda_env(env)
cli_run_list = [tmp_example_dir] + params
invoke_cli_runner(cli.run, cli_run_list)
@pytest.mark.large
@pytest.mark.parametrize(
"directory, command",
[
("docker", ["docker", "build", "-t", "mlflow-docker-example", "-f", "Dockerfile", "."]),
("gluon", ["python", "train.py"]),
("keras", ["python", "train.py"]),
(
"lightgbm",
[
"python",
"train.py",
"--learning-rate",
"0.2",
"--colsample-bytree",
"0.8",
"--subsample",
"0.9",
],
),
("statsmodels", ["python", "train.py", "--inverse-method", "qr"]),
("quickstart", ["python", "mlflow_tracking.py"]),
("remote_store", ["python", "remote_server.py"]),
(
"xgboost",
[
"python",
"train.py",
"--learning-rate",
"0.2",
"--colsample-bytree",
"0.8",
"--subsample",
"0.9",
],
|
("sklearn_autolog", ["python", "linear_regression.py"]),
("sklearn_autolog", ["python", "pipeline.py"]),
("sklearn_autolog", ["python", "grid_search_cv.py"]),
("pyspark_ml_autologging", ["python", "logistic_regression.py"]),
("pyspark_ml_autologging", ["python", "one_vs_rest.py"]),
("pyspark_ml_autologging", ["python", "pipeline.py"]),
("shap", ["python", "regression.py"]),
("shap", ["python", "binary_classification.py"]),
("shap", ["python", "multiclass_classification.py"]),
("shap", ["python", "explainer_logging.py"]),
("ray_serve", ["python", "train_model.py"]),
("pip_requirements", ["python", "pip_requirements.py"]),
],
)
def test_command_example(directory, command):
cwd_dir = os.path.join(EXAMPLES_DIR, directory)
process.exec_cmd(command, cwd=cwd_dir)
|
),
("catboost", ["python", "train.py"]),
("prophet", ["python", "train.py"]),
|
aws-ssm-association_target.go
|
package resources
import "github.com/awslabs/goformation/cloudformation/policies"
// AWSSSMAssociation_Target AWS CloudFormation Resource (AWS::SSM::Association.Target)
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ssm-association-target.html
type AWSSSMAssociation_Target struct {
// Key AWS CloudFormation Property
// Required: true
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ssm-association-target.html#cfn-ssm-association-target-key
Key string `json:"Key,omitempty"`
// Values AWS CloudFormation Property
// Required: true
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ssm-association-target.html#cfn-ssm-association-target-values
Values []string `json:"Values,omitempty"`
// _deletionPolicy represents a CloudFormation DeletionPolicy
_deletionPolicy policies.DeletionPolicy
// _dependsOn stores the logical ID of the resources to be created before this resource
_dependsOn []string
// _metadata stores structured data associated with this resource
_metadata map[string]interface{}
}
// AWSCloudFormationType returns the AWS CloudFormation resource type
func (r *AWSSSMAssociation_Target) AWSCloudFormationType() string {
return "AWS::SSM::Association.Target"
}
// DependsOn returns a slice of logical ID names this resource depends on.
// see: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-dependson.html
func (r *AWSSSMAssociation_Target) DependsOn() []string {
return r._dependsOn
}
// SetDependsOn specify that the creation of this resource follows another.
// see: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-dependson.html
func (r *AWSSSMAssociation_Target) SetDependsOn(dependencies []string) {
r._dependsOn = dependencies
}
// Metadata returns the metadata associated with this resource.
// see: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-metadata.html
func (r *AWSSSMAssociation_Target) Metadata() map[string]interface{} {
return r._metadata
}
// SetMetadata enables you to associate structured data with this resource.
// see: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-metadata.html
func (r *AWSSSMAssociation_Target) SetMetadata(metadata map[string]interface{}) {
r._metadata = metadata
}
// DeletionPolicy returns the AWS CloudFormation DeletionPolicy to this resource
// see: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-deletionpolicy.html
func (r *AWSSSMAssociation_Target) DeletionPolicy() policies.DeletionPolicy {
return r._deletionPolicy
}
// SetDeletionPolicy applies an AWS CloudFormation DeletionPolicy to this resource
// see: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-deletionpolicy.html
func (r *AWSSSMAssociation_Target) SetDeletionPolicy(policy policies.DeletionPolicy) {
|
}
|
r._deletionPolicy = policy
|
util.rs
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use kvproto::coprocessor as coppb;
use tipb::schema::ColumnInfo;
use crate::coprocessor::codec::datum::Datum;
use crate::coprocessor::*;
/// A `RequestHandler` that always produces errors.
pub struct ErrorRequestHandler {
error: Option<Error>,
}
impl ErrorRequestHandler {
pub fn new(error: Error) -> ErrorRequestHandler
|
}
impl RequestHandler for ErrorRequestHandler {
fn handle_request(&mut self) -> Result<coppb::Response> {
Err(self.error.take().unwrap())
}
fn handle_streaming_request(&mut self) -> Result<(Option<coppb::Response>, bool)> {
Err(self.error.take().unwrap())
}
}
/// Convert the key to the smallest key which is larger than the key given.
pub fn convert_to_prefix_next(key: &mut Vec<u8>) {
if key.is_empty() {
key.push(0);
return;
}
let mut i = key.len() - 1;
// Add 1 to the last byte that is not 255, and set it's following bytes to 0.
loop {
if key[i] == 255 {
key[i] = 0;
} else {
key[i] += 1;
return;
}
if i == 0 {
// All bytes are 255. Append a 0 to the key.
for byte in key.iter_mut() {
*byte = 255;
}
key.push(0);
return;
}
i -= 1;
}
}
/// Check if `key`'s prefix next equals to `next`
pub fn is_prefix_next(key: &[u8], next: &[u8]) -> bool {
let len = key.len();
let next_len = next.len();
if len == next_len {
// Find the last non-255 byte
let mut carry_pos = len;
loop {
if carry_pos == 0 {
// All bytes of `key` are 255. `next` couldn't be `key`'s prefix_next since their
// lengths are equal.
return false;
}
carry_pos -= 1;
if key[carry_pos] != 255 {
break;
}
}
// Now `carry_pos` is the index of the last byte that is not 255. For example:
// key: [1, 2, 3, 255, 255, 255]
// ^ carry_pos == 2
// So they are equal when:
// * `key`'s value at `carry_pos` is that of `next` - 1 and
// * `next`'s part after carry_pos is all 0
// * `key` and `next`'s parts before `carry_pos` are equal.
// For example:
// key: [1, 2, 3, 255, 255, 255]
// next: [1, 2, 4, 0, 0, 0]
// ^ carry_pos == 2
// The part before `carry_pos` is all [1, 2],
// the bytes at `carry_pos` differs by 1 (4 == 3 + 1), and
// the remaining bytes of next ([0, 0, 0]) is all 0.
// so [1, 2, 4, 0, 0, 0] is prefix_next of [1, 2, 3, 255, 255, 255]
key[carry_pos] + 1 == next[carry_pos]
&& next[carry_pos + 1..].iter().all(|byte| *byte == 0)
&& key[..carry_pos] == next[..carry_pos]
} else if len + 1 == next_len {
// `next` must has one more 0 than `key`, and the first `len` bytes must be all 255.
// The case that `len == 0` is also covered here.
*next.last().unwrap() == 0
&& key.iter().all(|byte| *byte == 255)
&& next.iter().take(len).all(|byte| *byte == 255)
} else {
// Length not match.
false
}
}
/// `is_point` checks if the key range represents a point.
#[inline]
pub fn is_point(range: &coppb::KeyRange) -> bool {
is_prefix_next(range.get_start(), range.get_end())
}
#[inline]
pub fn get_pk(col: &ColumnInfo, h: i64) -> Datum {
use cop_datatype::{FieldTypeAccessor, FieldTypeFlag};
if col.flag().contains(FieldTypeFlag::UNSIGNED) {
// PK column is unsigned
Datum::U64(h as u64)
} else {
Datum::I64(h)
}
}
#[cfg(test)]
mod tests {
use super::*;
fn test_prefix_next_once(key: &[u8], expected: &[u8]) {
let mut key = key.to_vec();
convert_to_prefix_next(&mut key);
assert_eq!(key.as_slice(), expected);
}
#[test]
fn test_prefix_next() {
test_prefix_next_once(&[], &[0]);
test_prefix_next_once(&[0], &[1]);
test_prefix_next_once(&[1], &[2]);
test_prefix_next_once(&[255], &[255, 0]);
test_prefix_next_once(&[255, 255, 255], &[255, 255, 255, 0]);
test_prefix_next_once(&[1, 255], &[2, 0]);
test_prefix_next_once(&[0, 1, 255], &[0, 2, 0]);
test_prefix_next_once(&[0, 1, 255, 5], &[0, 1, 255, 6]);
test_prefix_next_once(&[0, 1, 5, 255], &[0, 1, 6, 0]);
test_prefix_next_once(&[0, 1, 255, 255], &[0, 2, 0, 0]);
test_prefix_next_once(&[0, 255, 255, 255], &[1, 0, 0, 0]);
}
fn test_is_prefix_next_case(lhs: &[u8], expected: &[u8], unexpected: &[&[u8]]) {
assert!(is_prefix_next(lhs, expected));
for rhs in unexpected {
assert!(!is_prefix_next(lhs, rhs));
}
}
#[test]
fn test_is_prefix_next() {
test_is_prefix_next_case(&[], &[0], &[&[], &[1], &[2]]);
test_is_prefix_next_case(&[0], &[1], &[&[], &[0], &[0, 0], &[2], &[255]]);
test_is_prefix_next_case(&[1], &[2], &[&[], &[1], &[3], &[1, 0]]);
test_is_prefix_next_case(&[255], &[255, 0], &[&[0], &[255, 255, 0]]);
test_is_prefix_next_case(
&[255, 255, 255],
&[255, 255, 255, 0],
&[
&[],
&[0],
&[0, 0, 0],
&[255, 255, 0],
&[255, 255, 255, 255, 0],
],
);
test_is_prefix_next_case(
&[1, 255],
&[2, 0],
&[&[], &[1, 255, 0], &[2, 255], &[1, 255], &[2, 0, 0]],
);
test_is_prefix_next_case(
&[0, 255],
&[1, 0],
&[&[], &[0, 255, 0], &[1, 255], &[0, 255], &[1, 0, 0]],
);
test_is_prefix_next_case(
&[1, 2, 3, 4, 255, 255],
&[1, 2, 3, 5, 0, 0],
&[
&[],
&[1, 2, 3, 4, 255, 255],
&[1, 2, 3, 4, 0, 0],
&[1, 2, 3, 5, 255, 255],
&[1, 2, 3, 5, 0, 1],
&[1, 2, 3, 5, 1, 0],
&[1, 2, 4, 0, 0, 0],
],
);
}
}
|
{
ErrorRequestHandler { error: Some(error) }
}
|
state_test.go
|
/*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package state
import (
"bytes"
"errors"
"fmt"
"math/rand"
"net"
"sync"
"sync/atomic"
"testing"
"time"
pb "github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric/bccsp/factory"
"github.com/hyperledger/fabric/common/configtx/test"
errors2 "github.com/hyperledger/fabric/common/errors"
"github.com/hyperledger/fabric/common/flogging/floggingtest"
"github.com/hyperledger/fabric/common/metrics/disabled"
"github.com/hyperledger/fabric/common/util"
corecomm "github.com/hyperledger/fabric/core/comm"
"github.com/hyperledger/fabric/core/committer"
"github.com/hyperledger/fabric/core/committer/txvalidator"
"github.com/hyperledger/fabric/core/ledger"
"github.com/hyperledger/fabric/core/mocks/validator"
"github.com/hyperledger/fabric/core/transientstore"
"github.com/hyperledger/fabric/gossip/api"
"github.com/hyperledger/fabric/gossip/comm"
"github.com/hyperledger/fabric/gossip/common"
"github.com/hyperledger/fabric/gossip/discovery"
"github.com/hyperledger/fabric/gossip/gossip"
"github.com/hyperledger/fabric/gossip/gossip/algo"
"github.com/hyperledger/fabric/gossip/gossip/channel"
"github.com/hyperledger/fabric/gossip/metrics"
"github.com/hyperledger/fabric/gossip/privdata"
capabilitymock "github.com/hyperledger/fabric/gossip/privdata/mocks"
"github.com/hyperledger/fabric/gossip/state/mocks"
gossiputil "github.com/hyperledger/fabric/gossip/util"
gutil "github.com/hyperledger/fabric/gossip/util"
pcomm "github.com/hyperledger/fabric/protos/common"
proto "github.com/hyperledger/fabric/protos/gossip"
"github.com/hyperledger/fabric/protos/ledger/rwset"
transientstore2 "github.com/hyperledger/fabric/protos/transientstore"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
var (
orgID = []byte("ORG1")
noopPeerIdentityAcceptor = func(identity api.PeerIdentityType) error {
return nil
}
config = &Configuration{
AntiEntropyInterval: DefAntiEntropyInterval,
AntiEntropyStateResponseTimeout: DefAntiEntropyStateResponseTimeout,
AntiEntropyBatchSize: DefAntiEntropyBatchSize,
MaxBlockDistance: DefMaxBlockDistance,
AntiEntropyMaxRetries: DefAntiEntropyMaxRetries,
ChannelBufferSize: DefChannelBufferSize,
EnableStateTransfer: true,
BlockingMode: Blocking,
}
)
type peerIdentityAcceptor func(identity api.PeerIdentityType) error
type joinChanMsg struct {
}
func init() {
gutil.SetupTestLogging()
factory.InitFactories(nil)
}
// SequenceNumber returns the sequence number of the block that the message
// is derived from
func (*joinChanMsg) SequenceNumber() uint64 {
return uint64(time.Now().UnixNano())
}
// Members returns the organizations of the channel
func (jcm *joinChanMsg) Members() []api.OrgIdentityType {
return []api.OrgIdentityType{orgID}
}
// AnchorPeersOf returns the anchor peers of the given organization
func (jcm *joinChanMsg) AnchorPeersOf(org api.OrgIdentityType) []api.AnchorPeer {
return []api.AnchorPeer{}
}
type orgCryptoService struct {
}
// OrgByPeerIdentity returns the OrgIdentityType
// of a given peer identity
func (*orgCryptoService) OrgByPeerIdentity(identity api.PeerIdentityType) api.OrgIdentityType {
return orgID
}
// Verify verifies a JoinChannelMessage, returns nil on success,
// and an error on failure
func (*orgCryptoService) Verify(joinChanMsg api.JoinChannelMessage) error {
return nil
}
type cryptoServiceMock struct {
acceptor peerIdentityAcceptor
}
func (cryptoServiceMock) Expiration(peerIdentity api.PeerIdentityType) (time.Time, error) {
return time.Now().Add(time.Hour), nil
}
// GetPKIidOfCert returns the PKI-ID of a peer's identity
func (*cryptoServiceMock) GetPKIidOfCert(peerIdentity api.PeerIdentityType) common.PKIidType {
return common.PKIidType(peerIdentity)
}
// VerifyBlock returns nil if the block is properly signed,
// else returns error
func (*cryptoServiceMock) VerifyBlock(chainID common.ChainID, seqNum uint64, signedBlock []byte) error {
return nil
}
// Sign signs msg with this peer's signing key and outputs
// the signature if no error occurred.
func (*cryptoServiceMock) Sign(msg []byte) ([]byte, error) {
clone := make([]byte, len(msg))
copy(clone, msg)
return clone, nil
}
// Verify checks that signature is a valid signature of message under a peer's verification key.
// If the verification succeeded, Verify returns nil meaning no error occurred.
// If peerCert is nil, then the signature is verified against this peer's verification key.
func (*cryptoServiceMock) Verify(peerIdentity api.PeerIdentityType, signature, message []byte) error {
equal := bytes.Equal(signature, message)
if !equal {
return fmt.Errorf("Wrong signature:%v, %v", signature, message)
}
return nil
}
// VerifyByChannel checks that signature is a valid signature of message
// under a peer's verification key, but also in the context of a specific channel.
// If the verification succeeded, Verify returns nil meaning no error occurred.
// If peerIdentity is nil, then the signature is verified against this peer's verification key.
func (cs *cryptoServiceMock) VerifyByChannel(chainID common.ChainID, peerIdentity api.PeerIdentityType, signature, message []byte) error {
return cs.acceptor(peerIdentity)
}
func (*cryptoServiceMock) ValidateIdentity(peerIdentity api.PeerIdentityType) error {
return nil
}
func bootPeersWithPorts(ports ...int) []string
|
// Simple presentation of peer which includes only
// communication module, gossip and state transfer
type peerNode struct {
port int
g gossip.Gossip
s *GossipStateProviderImpl
cs *cryptoServiceMock
commit committer.Committer
grpc *corecomm.GRPCServer
}
// Shutting down all modules used
func (node *peerNode) shutdown() {
node.s.Stop()
node.g.Stop()
node.grpc.Stop()
}
type mockTransientStore struct {
}
func (*mockTransientStore) PurgeByHeight(maxBlockNumToRetain uint64) error {
return nil
}
func (*mockTransientStore) Persist(txid string, blockHeight uint64, privateSimulationResults *rwset.TxPvtReadWriteSet) error {
panic("implement me")
}
func (*mockTransientStore) PersistWithConfig(txid string, blockHeight uint64, privateSimulationResultsWithConfig *transientstore2.TxPvtReadWriteSetWithConfigInfo) error {
panic("implement me")
}
func (mockTransientStore) GetTxPvtRWSetByTxid(txid string, filter ledger.PvtNsCollFilter) (transientstore.RWSetScanner, error) {
panic("implement me")
}
func (*mockTransientStore) PurgeByTxids(txids []string) error {
panic("implement me")
}
type mockCommitter struct {
*mock.Mock
sync.Mutex
}
func (mc *mockCommitter) GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error) {
args := mc.Called()
return args.Get(0).(ledger.ConfigHistoryRetriever), args.Error(1)
}
func (mc *mockCommitter) GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) {
args := mc.Called(blockNum, filter)
return args.Get(0).([]*ledger.TxPvtData), args.Error(1)
}
func (mc *mockCommitter) CommitWithPvtData(blockAndPvtData *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error {
mc.Lock()
m := mc.Mock
mc.Unlock()
m.Called(blockAndPvtData.Block)
return nil
}
func (mc *mockCommitter) GetPvtDataAndBlockByNum(seqNum uint64) (*ledger.BlockAndPvtData, error) {
args := mc.Called(seqNum)
return args.Get(0).(*ledger.BlockAndPvtData), args.Error(1)
}
func (mc *mockCommitter) LedgerHeight() (uint64, error) {
mc.Lock()
m := mc.Mock
mc.Unlock()
args := m.Called()
if args.Get(1) == nil {
return args.Get(0).(uint64), nil
}
return args.Get(0).(uint64), args.Get(1).(error)
}
func (mc *mockCommitter) DoesPvtDataInfoExistInLedger(blkNum uint64) (bool, error) {
args := mc.Called(blkNum)
return args.Get(0).(bool), args.Error(1)
}
func (mc *mockCommitter) GetBlocks(blockSeqs []uint64) []*pcomm.Block {
if mc.Called(blockSeqs).Get(0) == nil {
return nil
}
return mc.Called(blockSeqs).Get(0).([]*pcomm.Block)
}
func (*mockCommitter) GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error) {
panic("implement me")
}
func (*mockCommitter) CommitPvtDataOfOldBlocks(blockPvtData []*ledger.BlockPvtData) ([]*ledger.PvtdataHashMismatch, error) {
panic("implement me")
}
func (*mockCommitter) Close() {
}
type ramLedger struct {
ledger map[uint64]*ledger.BlockAndPvtData
sync.RWMutex
}
func (mock *ramLedger) GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error) {
panic("implement me")
}
func (mock *ramLedger) CommitPvtDataOfOldBlocks(blockPvtData []*ledger.BlockPvtData) ([]*ledger.PvtdataHashMismatch, error) {
panic("implement me")
}
func (mock *ramLedger) GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error) {
panic("implement me")
}
func (mock *ramLedger) GetPvtDataAndBlockByNum(blockNum uint64, filter ledger.PvtNsCollFilter) (*ledger.BlockAndPvtData, error) {
mock.RLock()
defer mock.RUnlock()
if block, ok := mock.ledger[blockNum]; !ok {
return nil, errors.New(fmt.Sprintf("no block with seq = %d found", blockNum))
} else {
return block, nil
}
}
func (mock *ramLedger) GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) {
panic("implement me")
}
func (mock *ramLedger) CommitWithPvtData(blockAndPvtdata *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error {
mock.Lock()
defer mock.Unlock()
if blockAndPvtdata != nil && blockAndPvtdata.Block != nil {
mock.ledger[blockAndPvtdata.Block.Header.Number] = blockAndPvtdata
return nil
}
return errors.New("invalid input parameters for block and private data param")
}
func (mock *ramLedger) GetBlockchainInfo() (*pcomm.BlockchainInfo, error) {
mock.RLock()
defer mock.RUnlock()
currentBlock := mock.ledger[uint64(len(mock.ledger)-1)].Block
return &pcomm.BlockchainInfo{
Height: currentBlock.Header.Number + 1,
CurrentBlockHash: currentBlock.Header.Hash(),
PreviousBlockHash: currentBlock.Header.PreviousHash,
}, nil
}
func (mock *ramLedger) DoesPvtDataInfoExist(blkNum uint64) (bool, error) {
return false, nil
}
func (mock *ramLedger) GetBlockByNumber(blockNumber uint64) (*pcomm.Block, error) {
mock.RLock()
defer mock.RUnlock()
if blockAndPvtData, ok := mock.ledger[blockNumber]; !ok {
return nil, errors.New(fmt.Sprintf("no block with seq = %d found", blockNumber))
} else {
return blockAndPvtData.Block, nil
}
}
func (mock *ramLedger) Close() {
}
// Create new instance of KVLedger to be used for testing
func newCommitter() committer.Committer {
cb, _ := test.MakeGenesisBlock("testChain")
ldgr := &ramLedger{
ledger: make(map[uint64]*ledger.BlockAndPvtData),
}
ldgr.CommitWithPvtData(&ledger.BlockAndPvtData{Block: cb}, &ledger.CommitOptions{})
return committer.NewLedgerCommitter(ldgr)
}
func newPeerNodeWithGossip(id int, committer committer.Committer,
acceptor peerIdentityAcceptor, g gossip.Gossip, bootPorts ...int) *peerNode {
return newPeerNodeWithGossipWithValidator(id, committer, acceptor, g, &validator.MockValidator{}, bootPorts...)
}
// Constructing pseudo peer node, simulating only gossip and state transfer part
func newPeerNodeWithGossipWithValidatorWithMetrics(id int, committer committer.Committer,
acceptor peerIdentityAcceptor, g gossip.Gossip, v txvalidator.Validator,
gossipMetrics *metrics.GossipMetrics, bootPorts ...int) (node *peerNode, port int) {
cs := &cryptoServiceMock{acceptor: acceptor}
port, gRPCServer, certs, secureDialOpts, _ := gossiputil.CreateGRPCLayer()
if g == nil {
config := &gossip.Config{
BindPort: port,
BootstrapPeers: bootPeersWithPorts(bootPorts...),
ID: fmt.Sprintf("p%d", id),
MaxBlockCountToStore: 0,
MaxPropagationBurstLatency: time.Duration(10) * time.Millisecond,
MaxPropagationBurstSize: 10,
PropagateIterations: 1,
PropagatePeerNum: 3,
PullInterval: time.Duration(4) * time.Second,
PullPeerNum: 5,
InternalEndpoint: fmt.Sprintf("127.0.0.1:%d", port),
PublishCertPeriod: 10 * time.Second,
RequestStateInfoInterval: 4 * time.Second,
PublishStateInfoInterval: 4 * time.Second,
TimeForMembershipTracker: 5 * time.Second,
TLSCerts: certs,
DigestWaitTime: algo.DefDigestWaitTime,
RequestWaitTime: algo.DefRequestWaitTime,
ResponseWaitTime: algo.DefResponseWaitTime,
DialTimeout: comm.DefDialTimeout,
ConnTimeout: comm.DefConnTimeout,
RecvBuffSize: comm.DefRecvBuffSize,
SendBuffSize: comm.DefSendBuffSize,
MsgExpirationTimeout: channel.DefMsgExpirationTimeout,
AliveTimeInterval: discovery.DefAliveTimeInterval,
AliveExpirationTimeout: discovery.DefAliveExpirationTimeout,
AliveExpirationCheckInterval: discovery.DefAliveExpirationCheckInterval,
ReconnectInterval: discovery.DefReconnectInterval,
}
selfID := api.PeerIdentityType(config.InternalEndpoint)
mcs := &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor}
g = gossip.NewGossipService(config, gRPCServer.Server(), &orgCryptoService{}, mcs, selfID, secureDialOpts, gossipMetrics)
}
g.JoinChan(&joinChanMsg{}, common.ChainID(util.GetTestChainID()))
go func() {
gRPCServer.Start()
}()
// Initialize pseudo peer simulator, which has only three
// basic parts
servicesAdapater := &ServicesMediator{GossipAdapter: g, MCSAdapter: cs}
coordConfig := privdata.CoordinatorConfig{
PullRetryThreshold: 0,
TransientBlockRetention: privdata.TransientBlockRetentionDefault,
SkipPullingInvalidTransactions: false,
}
mspID := "Org1MSP"
capabilityProvider := &capabilitymock.CapabilityProvider{}
appCapability := &capabilitymock.AppCapabilities{}
capabilityProvider.On("Capabilities").Return(appCapability)
appCapability.On("StorePvtDataOfInvalidTx").Return(true)
coord := privdata.NewCoordinator(mspID, privdata.Support{
Validator: v,
TransientStore: &mockTransientStore{},
Committer: committer,
CapabilityProvider: capabilityProvider,
}, pcomm.SignedData{}, gossipMetrics.PrivdataMetrics, coordConfig)
sp := NewGossipStateProvider(util.GetTestChainID(), servicesAdapater, coord, gossipMetrics.StateMetrics, config)
if sp == nil {
gRPCServer.Stop()
return nil, port
}
return &peerNode{
port: port,
g: g,
s: sp.(*GossipStateProviderImpl),
commit: committer,
cs: cs,
grpc: gRPCServer,
}, port
}
// add metrics provider for metrics testing
func newPeerNodeWithGossipWithMetrics(id int, committer committer.Committer,
acceptor peerIdentityAcceptor, g gossip.Gossip, gossipMetrics *metrics.GossipMetrics) *peerNode {
node, _ := newPeerNodeWithGossipWithValidatorWithMetrics(id, committer, acceptor, g,
&validator.MockValidator{}, gossipMetrics)
return node
}
// Constructing pseudo peer node, simulating only gossip and state transfer part
func newPeerNodeWithGossipWithValidator(id int, committer committer.Committer,
acceptor peerIdentityAcceptor, g gossip.Gossip, v txvalidator.Validator, bootPorts ...int) *peerNode {
gossipMetrics := metrics.NewGossipMetrics(&disabled.Provider{})
node, _ := newPeerNodeWithGossipWithValidatorWithMetrics(id, committer, acceptor, g, v, gossipMetrics, bootPorts...)
return node
}
// Constructing pseudo peer node, simulating only gossip and state transfer part
func newPeerNode(id int, committer committer.Committer, acceptor peerIdentityAcceptor, bootPorts ...int) *peerNode {
return newPeerNodeWithGossip(id, committer, acceptor, nil, bootPorts...)
}
// Constructing pseudo boot node, simulating only gossip and state transfer part, return port
func newBootNode(id int, committer committer.Committer, acceptor peerIdentityAcceptor) (node *peerNode, port int) {
v := &validator.MockValidator{}
gossipMetrics := metrics.NewGossipMetrics(&disabled.Provider{})
return newPeerNodeWithGossipWithValidatorWithMetrics(id, committer, acceptor, nil, v, gossipMetrics)
}
func TestNilDirectMsg(t *testing.T) {
t.Parallel()
mc := &mockCommitter{Mock: &mock.Mock{}}
mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
g := &mocks.GossipMock{}
g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage))
p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
defer p.shutdown()
p.s.handleStateRequest(nil)
p.s.directMessage(nil)
sMsg, _ := p.s.stateRequestMessage(uint64(10), uint64(8)).NoopSign()
req := &comm.ReceivedMessageImpl{
SignedGossipMessage: sMsg,
}
p.s.directMessage(req)
}
func TestNilAddPayload(t *testing.T) {
t.Parallel()
mc := &mockCommitter{Mock: &mock.Mock{}}
mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
g := &mocks.GossipMock{}
g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage))
p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
defer p.shutdown()
err := p.s.AddPayload(nil)
assert.Error(t, err)
assert.Contains(t, err.Error(), "nil")
}
func TestAddPayloadLedgerUnavailable(t *testing.T) {
t.Parallel()
mc := &mockCommitter{Mock: &mock.Mock{}}
mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
g := &mocks.GossipMock{}
g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage))
p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
defer p.shutdown()
// Simulate a problem in the ledger
failedLedger := mock.Mock{}
failedLedger.On("LedgerHeight", mock.Anything).Return(uint64(0), errors.New("cannot query ledger"))
mc.Lock()
mc.Mock = &failedLedger
mc.Unlock()
rawblock := pcomm.NewBlock(uint64(1), []byte{})
b, _ := pb.Marshal(rawblock)
err := p.s.AddPayload(&proto.Payload{
SeqNum: uint64(1),
Data: b,
})
assert.Error(t, err)
assert.Contains(t, err.Error(), "Failed obtaining ledger height")
assert.Contains(t, err.Error(), "cannot query ledger")
}
func TestLargeBlockGap(t *testing.T) {
// Scenario: the peer knows of a peer who has a ledger height much higher
// than itself (500 blocks higher).
// The peer needs to ask blocks in a way such that the size of the payload buffer
// never rises above a certain threshold.
t.Parallel()
mc := &mockCommitter{Mock: &mock.Mock{}}
blocksPassedToLedger := make(chan uint64, 200)
mc.On("CommitWithPvtData", mock.Anything).Run(func(arg mock.Arguments) {
blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
})
msgsFromPeer := make(chan proto.ReceivedMessage)
mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
mc.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
g := &mocks.GossipMock{}
membership := []discovery.NetworkMember{
{
PKIid: common.PKIidType("a"),
Endpoint: "a",
Properties: &proto.Properties{
LedgerHeight: 500,
},
}}
g.On("PeersOfChannel", mock.Anything).Return(membership)
g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
g.On("Accept", mock.Anything, true).Return(nil, msgsFromPeer)
g.On("Send", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) {
msg := arguments.Get(0).(*proto.GossipMessage)
// The peer requested a state request
req := msg.GetStateRequest()
// Construct a skeleton for the response
res := &proto.GossipMessage{
Nonce: msg.Nonce,
Channel: []byte(util.GetTestChainID()),
Content: &proto.GossipMessage_StateResponse{
StateResponse: &proto.RemoteStateResponse{},
},
}
// Populate the response with payloads according to what the peer asked
for seq := req.StartSeqNum; seq <= req.EndSeqNum; seq++ {
rawblock := pcomm.NewBlock(seq, []byte{})
b, _ := pb.Marshal(rawblock)
payload := &proto.Payload{
SeqNum: seq,
Data: b,
}
res.GetStateResponse().Payloads = append(res.GetStateResponse().Payloads, payload)
}
// Finally, send the response down the channel the peer expects to receive it from
sMsg, _ := res.NoopSign()
msgsFromPeer <- &comm.ReceivedMessageImpl{
SignedGossipMessage: sMsg,
}
})
p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
defer p.shutdown()
// Process blocks at a speed of 20 Millisecond for each block.
// The imaginative peer that responds to state
// If the payload buffer expands above defMaxBlockDistance*2 + defAntiEntropyBatchSize blocks, fail the test
blockProcessingTime := 20 * time.Millisecond // 10 seconds for total 500 blocks
expectedSequence := 1
for expectedSequence < 500 {
blockSeq := <-blocksPassedToLedger
assert.Equal(t, expectedSequence, int(blockSeq))
// Ensure payload buffer isn't over-populated
assert.True(t, p.s.payloads.Size() <= DefMaxBlockDistance*2+DefAntiEntropyBatchSize, "payload buffer size is %d", p.s.payloads.Size())
expectedSequence++
time.Sleep(blockProcessingTime)
}
}
func TestOverPopulation(t *testing.T) {
// Scenario: Add to the state provider blocks
// with a gap in between, and ensure that the payload buffer
// rejects blocks starting if the distance between the ledger height to the latest
// block it contains is bigger than defMaxBlockDistance.
t.Parallel()
mc := &mockCommitter{Mock: &mock.Mock{}}
blocksPassedToLedger := make(chan uint64, 10)
mc.On("CommitWithPvtData", mock.Anything).Run(func(arg mock.Arguments) {
blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
})
mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
mc.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
g := &mocks.GossipMock{}
g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage))
p := newPeerNode(0, mc, noopPeerIdentityAcceptor)
defer p.shutdown()
// Add some blocks in a sequential manner and make sure it works
for i := 1; i <= 4; i++ {
rawblock := pcomm.NewBlock(uint64(i), []byte{})
b, _ := pb.Marshal(rawblock)
assert.NoError(t, p.s.addPayload(&proto.Payload{
SeqNum: uint64(i),
Data: b,
}, NonBlocking))
}
// Add payloads from 10 to defMaxBlockDistance, while we're missing blocks [5,9]
// Should succeed
for i := 10; i <= DefMaxBlockDistance; i++ {
rawblock := pcomm.NewBlock(uint64(i), []byte{})
b, _ := pb.Marshal(rawblock)
assert.NoError(t, p.s.addPayload(&proto.Payload{
SeqNum: uint64(i),
Data: b,
}, NonBlocking))
}
// Add payloads from defMaxBlockDistance + 2 to defMaxBlockDistance * 10
// Should fail.
for i := DefMaxBlockDistance + 1; i <= DefMaxBlockDistance*10; i++ {
rawblock := pcomm.NewBlock(uint64(i), []byte{})
b, _ := pb.Marshal(rawblock)
assert.Error(t, p.s.addPayload(&proto.Payload{
SeqNum: uint64(i),
Data: b,
}, NonBlocking))
}
// Ensure only blocks 1-4 were passed to the ledger
close(blocksPassedToLedger)
i := 1
for seq := range blocksPassedToLedger {
assert.Equal(t, uint64(i), seq)
i++
}
assert.Equal(t, 5, i)
// Ensure we don't store too many blocks in memory
sp := p.s
assert.True(t, sp.payloads.Size() < DefMaxBlockDistance)
}
func TestBlockingEnqueue(t *testing.T) {
// Scenario: In parallel, get blocks from gossip and from the orderer.
// The blocks from the orderer we get are X2 times the amount of blocks from gossip.
// The blocks we get from gossip are random indices, to maximize disruption.
t.Parallel()
mc := &mockCommitter{Mock: &mock.Mock{}}
blocksPassedToLedger := make(chan uint64, 10)
mc.On("CommitWithPvtData", mock.Anything).Run(func(arg mock.Arguments) {
blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
})
mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
mc.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
g := &mocks.GossipMock{}
g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage))
p := newPeerNode(0, mc, noopPeerIdentityAcceptor)
defer p.shutdown()
numBlocksReceived := 500
receivedBlockCount := 0
// Get a block from the orderer every 1ms
go func() {
for i := 1; i <= numBlocksReceived; i++ {
rawblock := pcomm.NewBlock(uint64(i), []byte{})
b, _ := pb.Marshal(rawblock)
block := &proto.Payload{
SeqNum: uint64(i),
Data: b,
}
p.s.AddPayload(block)
time.Sleep(time.Millisecond)
}
}()
// Get a block from gossip every 1ms too
go func() {
rand.Seed(time.Now().UnixNano())
for i := 1; i <= numBlocksReceived/2; i++ {
blockSeq := rand.Intn(numBlocksReceived)
rawblock := pcomm.NewBlock(uint64(blockSeq), []byte{})
b, _ := pb.Marshal(rawblock)
block := &proto.Payload{
SeqNum: uint64(blockSeq),
Data: b,
}
p.s.addPayload(block, NonBlocking)
time.Sleep(time.Millisecond)
}
}()
for {
receivedBlock := <-blocksPassedToLedger
receivedBlockCount++
m := &mock.Mock{}
m.On("LedgerHeight", mock.Anything).Return(receivedBlock, nil)
m.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
m.On("CommitWithPvtData", mock.Anything).Run(func(arg mock.Arguments) {
blocksPassedToLedger <- arg.Get(0).(*pcomm.Block).Header.Number
})
mc.Lock()
mc.Mock = m
mc.Unlock()
assert.Equal(t, receivedBlock, uint64(receivedBlockCount))
if int(receivedBlockCount) == numBlocksReceived {
break
}
time.Sleep(time.Millisecond * 10)
}
}
func TestHaltChainProcessing(t *testing.T) {
gossipChannel := func(c chan *proto.GossipMessage) <-chan *proto.GossipMessage {
return c
}
makeBlock := func(seq int) []byte {
b := &pcomm.Block{
Header: &pcomm.BlockHeader{
Number: uint64(seq),
},
Data: &pcomm.BlockData{
Data: [][]byte{},
},
Metadata: &pcomm.BlockMetadata{
Metadata: [][]byte{
{}, {}, {}, {},
},
},
}
data, _ := pb.Marshal(b)
return data
}
newBlockMsg := func(i int) *proto.GossipMessage {
return &proto.GossipMessage{
Channel: []byte("testchainid"),
Content: &proto.GossipMessage_DataMsg{
DataMsg: &proto.DataMessage{
Payload: &proto.Payload{
SeqNum: uint64(i),
Data: makeBlock(i),
},
},
},
}
}
l, recorder := floggingtest.NewTestLogger(t)
logger = l
mc := &mockCommitter{Mock: &mock.Mock{}}
mc.On("CommitWithPvtData", mock.Anything)
mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
g := &mocks.GossipMock{}
gossipMsgs := make(chan *proto.GossipMessage)
g.On("Accept", mock.Anything, false).Return(gossipChannel(gossipMsgs), nil)
g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage))
g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{})
v := &validator.MockValidator{}
v.On("Validate").Return(&errors2.VSCCExecutionFailureError{
Err: errors.New("foobar"),
}).Once()
peerNode := newPeerNodeWithGossipWithValidator(0, mc, noopPeerIdentityAcceptor, g, v)
defer peerNode.shutdown()
gossipMsgs <- newBlockMsg(1)
assertLogged(t, recorder, "Got error while committing")
assertLogged(t, recorder, "Aborting chain processing")
assertLogged(t, recorder, "foobar")
}
func TestFailures(t *testing.T) {
t.Parallel()
mc := &mockCommitter{Mock: &mock.Mock{}}
mc.On("LedgerHeight", mock.Anything).Return(uint64(0), nil)
g := &mocks.GossipMock{}
g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage))
g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{})
assert.Panics(t, func() {
newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
})
// Reprogram mock
mc.Mock = &mock.Mock{}
mc.On("LedgerHeight", mock.Anything).Return(uint64(1), errors.New("Failed accessing ledger"))
assert.Nil(t, newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g))
}
func TestGossipReception(t *testing.T) {
t.Parallel()
signalChan := make(chan struct{})
rawblock := &pcomm.Block{
Header: &pcomm.BlockHeader{
Number: uint64(1),
},
Data: &pcomm.BlockData{
Data: [][]byte{},
},
Metadata: &pcomm.BlockMetadata{
Metadata: [][]byte{
{}, {}, {}, {},
},
},
}
b, _ := pb.Marshal(rawblock)
newMsg := func(channel string) *proto.GossipMessage {
{
return &proto.GossipMessage{
Channel: []byte(channel),
Content: &proto.GossipMessage_DataMsg{
DataMsg: &proto.DataMessage{
Payload: &proto.Payload{
SeqNum: 1,
Data: b,
},
},
},
}
}
}
createChan := func(signalChan chan struct{}) <-chan *proto.GossipMessage {
c := make(chan *proto.GossipMessage)
go func(c chan *proto.GossipMessage) {
// Wait for Accept() to be called
<-signalChan
// Simulate a message reception from the gossip component with an invalid channel
c <- newMsg("AAA")
// Simulate a message reception from the gossip component
c <- newMsg(util.GetTestChainID())
}(c)
return c
}
g := &mocks.GossipMock{}
rmc := createChan(signalChan)
g.On("Accept", mock.Anything, false).Return(rmc, nil).Run(func(_ mock.Arguments) {
signalChan <- struct{}{}
})
g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage))
g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{})
mc := &mockCommitter{Mock: &mock.Mock{}}
receivedChan := make(chan struct{})
mc.On("CommitWithPvtData", mock.Anything).Run(func(arguments mock.Arguments) {
block := arguments.Get(0).(*pcomm.Block)
assert.Equal(t, uint64(1), block.Header.Number)
receivedChan <- struct{}{}
})
mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
mc.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
defer p.shutdown()
select {
case <-receivedChan:
case <-time.After(time.Second * 15):
assert.Fail(t, "Didn't commit a block within a timely manner")
}
}
func TestLedgerHeightFromProperties(t *testing.T) {
// Scenario: For each test, spawn a peer and supply it
// with a specific mock of PeersOfChannel from peers that
// either set both metadata properly, or only the properties, or none, or both.
// Ensure the logic handles all of the 4 possible cases as needed
t.Parallel()
// Returns whether the given networkMember was selected or not
wasNetworkMemberSelected := func(t *testing.T, networkMember discovery.NetworkMember, wg *sync.WaitGroup) bool {
var wasGivenNetworkMemberSelected int32
finChan := make(chan struct{})
g := &mocks.GossipMock{}
g.On("Send", mock.Anything, mock.Anything).Run(func(arguments mock.Arguments) {
defer wg.Done()
msg := arguments.Get(0).(*proto.GossipMessage)
assert.NotNil(t, msg.GetStateRequest())
peer := arguments.Get(1).([]*comm.RemotePeer)[0]
if bytes.Equal(networkMember.PKIid, peer.PKIID) {
atomic.StoreInt32(&wasGivenNetworkMemberSelected, 1)
}
finChan <- struct{}{}
})
g.On("Accept", mock.Anything, false).Return(make(<-chan *proto.GossipMessage), nil)
g.On("Accept", mock.Anything, true).Return(nil, make(chan proto.ReceivedMessage))
defaultPeer := discovery.NetworkMember{
InternalEndpoint: "b",
PKIid: common.PKIidType("b"),
Properties: &proto.Properties{
LedgerHeight: 5,
},
}
g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{
defaultPeer,
networkMember,
})
mc := &mockCommitter{Mock: &mock.Mock{}}
mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil)
p := newPeerNodeWithGossip(0, mc, noopPeerIdentityAcceptor, g)
defer p.shutdown()
select {
case <-time.After(time.Second * 20):
t.Fatal("Didn't send a request within a timely manner")
case <-finChan:
}
return atomic.LoadInt32(&wasGivenNetworkMemberSelected) == 1
}
peerWithProperties := discovery.NetworkMember{
PKIid: common.PKIidType("peerWithoutMetadata"),
Properties: &proto.Properties{
LedgerHeight: 10,
},
InternalEndpoint: "peerWithoutMetadata",
}
peerWithoutProperties := discovery.NetworkMember{
PKIid: common.PKIidType("peerWithoutProperties"),
InternalEndpoint: "peerWithoutProperties",
}
tests := []struct {
shouldGivenBeSelected bool
member discovery.NetworkMember
}{
{member: peerWithProperties, shouldGivenBeSelected: true},
{member: peerWithoutProperties, shouldGivenBeSelected: false},
}
var wg sync.WaitGroup
wg.Add(len(tests))
for _, tst := range tests {
go func(shouldGivenBeSelected bool, member discovery.NetworkMember) {
assert.Equal(t, shouldGivenBeSelected, wasNetworkMemberSelected(t, member, &wg))
}(tst.shouldGivenBeSelected, tst.member)
}
wg.Wait()
}
func TestAccessControl(t *testing.T) {
t.Parallel()
bootstrapSetSize := 5
bootstrapSet := make([]*peerNode, 0)
authorizedPeersSize := 4
var listeners []net.Listener
var endpoints []string
for i := 0; i < authorizedPeersSize; i++ {
ll, err := net.Listen("tcp", "127.0.0.1:0")
assert.NoError(t, err)
listeners = append(listeners, ll)
endpoint := ll.Addr().String()
endpoints = append(endpoints, endpoint)
}
defer func() {
for _, ll := range listeners {
ll.Close()
}
}()
authorizedPeers := map[string]struct{}{
endpoints[0]: {},
endpoints[1]: {},
endpoints[2]: {},
endpoints[3]: {},
}
blockPullPolicy := func(identity api.PeerIdentityType) error {
if _, isAuthorized := authorizedPeers[string(identity)]; isAuthorized {
return nil
}
return errors.New("Not authorized")
}
var bootPorts []int
for i := 0; i < bootstrapSetSize; i++ {
commit := newCommitter()
bootPeer, bootPort := newBootNode(i, commit, blockPullPolicy)
bootstrapSet = append(bootstrapSet, bootPeer)
bootPorts = append(bootPorts, bootPort)
}
defer func() {
for _, p := range bootstrapSet {
p.shutdown()
}
}()
msgCount := 5
for i := 1; i <= msgCount; i++ {
rawblock := pcomm.NewBlock(uint64(i), []byte{})
if b, err := pb.Marshal(rawblock); err == nil {
payload := &proto.Payload{
SeqNum: uint64(i),
Data: b,
}
bootstrapSet[0].s.AddPayload(payload)
} else {
t.Fail()
}
}
standardPeerSetSize := 10
peersSet := make([]*peerNode, 0)
for i := 0; i < standardPeerSetSize; i++ {
commit := newCommitter()
peersSet = append(peersSet, newPeerNode(bootstrapSetSize+i, commit, blockPullPolicy, bootPorts...))
}
defer func() {
for _, p := range peersSet {
p.shutdown()
}
}()
waitUntilTrueOrTimeout(t, func() bool {
for _, p := range peersSet {
if len(p.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != bootstrapSetSize+standardPeerSetSize-1 {
t.Log("Peer discovery has not finished yet")
return false
}
}
t.Log("All peer discovered each other!!!")
return true
}, 30*time.Second)
t.Log("Waiting for all blocks to arrive.")
waitUntilTrueOrTimeout(t, func() bool {
t.Log("Trying to see all authorized peers get all blocks, and all non-authorized didn't")
for _, p := range peersSet {
height, err := p.commit.LedgerHeight()
id := fmt.Sprintf("127.0.0.1:%d", p.port)
if _, isAuthorized := authorizedPeers[id]; isAuthorized {
if height != uint64(msgCount+1) || err != nil {
return false
}
} else {
if err == nil && height > 1 {
assert.Fail(t, "Peer", id, "got message but isn't authorized! Height:", height)
}
}
}
t.Log("All peers have same ledger height!!!")
return true
}, 60*time.Second)
}
func TestNewGossipStateProvider_SendingManyMessages(t *testing.T) {
t.Parallel()
bootstrapSetSize := 5
bootstrapSet := make([]*peerNode, 0)
var bootPorts []int
for i := 0; i < bootstrapSetSize; i++ {
commit := newCommitter()
bootPeer, bootPort := newBootNode(i, commit, noopPeerIdentityAcceptor)
bootstrapSet = append(bootstrapSet, bootPeer)
bootPorts = append(bootPorts, bootPort)
}
defer func() {
for _, p := range bootstrapSet {
p.shutdown()
}
}()
msgCount := 10
for i := 1; i <= msgCount; i++ {
rawblock := pcomm.NewBlock(uint64(i), []byte{})
if b, err := pb.Marshal(rawblock); err == nil {
payload := &proto.Payload{
SeqNum: uint64(i),
Data: b,
}
bootstrapSet[0].s.AddPayload(payload)
} else {
t.Fail()
}
}
standartPeersSize := 10
peersSet := make([]*peerNode, 0)
for i := 0; i < standartPeersSize; i++ {
commit := newCommitter()
peersSet = append(peersSet, newPeerNode(bootstrapSetSize+i, commit, noopPeerIdentityAcceptor, bootPorts...))
}
defer func() {
for _, p := range peersSet {
p.shutdown()
}
}()
waitUntilTrueOrTimeout(t, func() bool {
for _, p := range peersSet {
if len(p.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != bootstrapSetSize+standartPeersSize-1 {
t.Log("Peer discovery has not finished yet")
return false
}
}
t.Log("All peer discovered each other!!!")
return true
}, 30*time.Second)
t.Log("Waiting for all blocks to arrive.")
waitUntilTrueOrTimeout(t, func() bool {
t.Log("Trying to see all peers get all blocks")
for _, p := range peersSet {
height, err := p.commit.LedgerHeight()
if height != uint64(msgCount+1) || err != nil {
return false
}
}
t.Log("All peers have same ledger height!!!")
return true
}, 60*time.Second)
}
func TestGossipStateProvider_TestStateMessages(t *testing.T) {
t.Parallel()
bootPeer, bootPort := newBootNode(0, newCommitter(), noopPeerIdentityAcceptor)
defer bootPeer.shutdown()
peer := newPeerNode(1, newCommitter(), noopPeerIdentityAcceptor, bootPort)
defer peer.shutdown()
naiveStateMsgPredicate := func(message interface{}) bool {
return message.(proto.ReceivedMessage).GetGossipMessage().IsRemoteStateMessage()
}
_, bootCh := bootPeer.g.Accept(naiveStateMsgPredicate, true)
_, peerCh := peer.g.Accept(naiveStateMsgPredicate, true)
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
msg := <-bootCh
t.Log("Bootstrap node got message, ", msg)
assert.True(t, msg.GetGossipMessage().GetStateRequest() != nil)
msg.Respond(&proto.GossipMessage{
Content: &proto.GossipMessage_StateResponse{StateResponse: &proto.RemoteStateResponse{Payloads: nil}},
})
wg.Done()
}()
go func() {
msg := <-peerCh
t.Log("Peer node got an answer, ", msg)
assert.True(t, msg.GetGossipMessage().GetStateResponse() != nil)
wg.Done()
}()
readyCh := make(chan struct{})
go func() {
wg.Wait()
readyCh <- struct{}{}
}()
chainID := common.ChainID(util.GetTestChainID())
waitUntilTrueOrTimeout(t, func() bool {
return len(peer.g.PeersOfChannel(chainID)) == 1
}, 30*time.Second)
t.Log("Sending gossip message with remote state request")
peer.g.Send(&proto.GossipMessage{
Content: &proto.GossipMessage_StateRequest{StateRequest: &proto.RemoteStateRequest{StartSeqNum: 0, EndSeqNum: 1}},
}, &comm.RemotePeer{Endpoint: peer.g.PeersOfChannel(chainID)[0].Endpoint, PKIID: peer.g.PeersOfChannel(chainID)[0].PKIid})
t.Log("Waiting until peers exchange messages")
select {
case <-readyCh:
{
t.Log("Done!!!")
}
case <-time.After(time.Duration(10) * time.Second):
{
t.Fail()
}
}
}
// Start one bootstrap peer and submit defAntiEntropyBatchSize + 5 messages into
// local ledger, next spawning a new peer waiting for anti-entropy procedure to
// complete missing blocks. Since state transfer messages now batched, it is expected
// to see _exactly_ two messages with state transfer response.
func TestNewGossipStateProvider_BatchingOfStateRequest(t *testing.T) {
t.Parallel()
bootPeer, bootPort := newBootNode(0, newCommitter(), noopPeerIdentityAcceptor)
defer bootPeer.shutdown()
msgCount := DefAntiEntropyBatchSize + 5
expectedMessagesCnt := 2
for i := 1; i <= msgCount; i++ {
rawblock := pcomm.NewBlock(uint64(i), []byte{})
if b, err := pb.Marshal(rawblock); err == nil {
payload := &proto.Payload{
SeqNum: uint64(i),
Data: b,
}
bootPeer.s.AddPayload(payload)
} else {
t.Fail()
}
}
peer := newPeerNode(1, newCommitter(), noopPeerIdentityAcceptor, bootPort)
defer peer.shutdown()
naiveStateMsgPredicate := func(message interface{}) bool {
return message.(proto.ReceivedMessage).GetGossipMessage().IsRemoteStateMessage()
}
_, peerCh := peer.g.Accept(naiveStateMsgPredicate, true)
messageCh := make(chan struct{})
stopWaiting := make(chan struct{})
// Number of submitted messages is defAntiEntropyBatchSize + 5, therefore
// expected number of batches is expectedMessagesCnt = 2. Following go routine
// makes sure it receives expected amount of messages and sends signal of success
// to continue the test
go func(expected int) {
cnt := 0
for cnt < expected {
select {
case <-peerCh:
{
cnt++
}
case <-stopWaiting:
{
return
}
}
}
messageCh <- struct{}{}
}(expectedMessagesCnt)
// Waits for message which indicates that expected number of message batches received
// otherwise timeouts after 2 * defAntiEntropyInterval + 1 seconds
select {
case <-messageCh:
{
// Once we got message which indicate of two batches being received,
// making sure messages indeed committed.
waitUntilTrueOrTimeout(t, func() bool {
if len(peer.g.PeersOfChannel(common.ChainID(util.GetTestChainID()))) != 1 {
t.Log("Peer discovery has not finished yet")
return false
}
t.Log("All peer discovered each other!!!")
return true
}, 30*time.Second)
t.Log("Waiting for all blocks to arrive.")
waitUntilTrueOrTimeout(t, func() bool {
t.Log("Trying to see all peers get all blocks")
height, err := peer.commit.LedgerHeight()
if height != uint64(msgCount+1) || err != nil {
return false
}
t.Log("All peers have same ledger height!!!")
return true
}, 60*time.Second)
}
case <-time.After(DefAntiEntropyInterval*2 + time.Second*1):
{
close(stopWaiting)
t.Fatal("Expected to receive two batches with missing payloads")
}
}
}
// coordinatorMock mocking structure to capture mock interface for
// coord to simulate coord flow during the test
type coordinatorMock struct {
committer.Committer
mock.Mock
}
func (mock *coordinatorMock) GetPvtDataAndBlockByNum(seqNum uint64, _ pcomm.SignedData) (*pcomm.Block, gutil.PvtDataCollections, error) {
args := mock.Called(seqNum)
return args.Get(0).(*pcomm.Block), args.Get(1).(gutil.PvtDataCollections), args.Error(2)
}
func (mock *coordinatorMock) GetBlockByNum(seqNum uint64) (*pcomm.Block, error) {
args := mock.Called(seqNum)
return args.Get(0).(*pcomm.Block), args.Error(1)
}
func (mock *coordinatorMock) StoreBlock(block *pcomm.Block, data gutil.PvtDataCollections) error {
args := mock.Called(block, data)
return args.Error(1)
}
func (mock *coordinatorMock) LedgerHeight() (uint64, error) {
args := mock.Called()
return args.Get(0).(uint64), args.Error(1)
}
func (mock *coordinatorMock) Close() {
mock.Called()
}
// StorePvtData used to persist private date into transient store
func (mock *coordinatorMock) StorePvtData(txid string, privData *transientstore2.TxPvtReadWriteSetWithConfigInfo, blkHeight uint64) error {
return mock.Called().Error(0)
}
type receivedMessageMock struct {
mock.Mock
}
// Ack returns to the sender an acknowledgement for the message
func (mock *receivedMessageMock) Ack(err error) {
}
func (mock *receivedMessageMock) Respond(msg *proto.GossipMessage) {
mock.Called(msg)
}
func (mock *receivedMessageMock) GetGossipMessage() *proto.SignedGossipMessage {
args := mock.Called()
return args.Get(0).(*proto.SignedGossipMessage)
}
func (mock *receivedMessageMock) GetSourceEnvelope() *proto.Envelope {
args := mock.Called()
return args.Get(0).(*proto.Envelope)
}
func (mock *receivedMessageMock) GetConnectionInfo() *proto.ConnectionInfo {
args := mock.Called()
return args.Get(0).(*proto.ConnectionInfo)
}
type testData struct {
block *pcomm.Block
pvtData gutil.PvtDataCollections
}
func TestTransferOfPrivateRWSet(t *testing.T) {
t.Parallel()
chainID := "testChainID"
// First gossip instance
g := &mocks.GossipMock{}
coord1 := new(coordinatorMock)
gossipChannel := make(chan *proto.GossipMessage)
commChannel := make(chan proto.ReceivedMessage)
gossipChannelFactory := func(ch chan *proto.GossipMessage) <-chan *proto.GossipMessage {
return ch
}
g.On("Accept", mock.Anything, false).Return(gossipChannelFactory(gossipChannel), nil)
g.On("Accept", mock.Anything, true).Return(nil, commChannel)
g.On("UpdateChannelMetadata", mock.Anything, mock.Anything)
g.On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{})
g.On("Close")
coord1.On("LedgerHeight", mock.Anything).Return(uint64(5), nil)
var data = map[uint64]*testData{
uint64(2): {
block: &pcomm.Block{
Header: &pcomm.BlockHeader{
Number: 2,
DataHash: []byte{0, 1, 1, 1},
PreviousHash: []byte{0, 0, 0, 1},
},
Data: &pcomm.BlockData{
Data: [][]byte{{1}, {2}, {3}},
},
},
pvtData: gutil.PvtDataCollections{
{
SeqInBlock: uint64(0),
WriteSet: &rwset.TxPvtReadWriteSet{
DataModel: rwset.TxReadWriteSet_KV,
NsPvtRwset: []*rwset.NsPvtReadWriteSet{
{
Namespace: "myCC:v1",
CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{
{
CollectionName: "mysecrectCollection",
Rwset: []byte{1, 2, 3, 4, 5},
},
},
},
},
},
},
},
},
uint64(3): {
block: &pcomm.Block{
Header: &pcomm.BlockHeader{
Number: 3,
DataHash: []byte{1, 1, 1, 1},
PreviousHash: []byte{0, 1, 1, 1},
},
Data: &pcomm.BlockData{
Data: [][]byte{{4}, {5}, {6}},
},
},
pvtData: gutil.PvtDataCollections{
{
SeqInBlock: uint64(2),
WriteSet: &rwset.TxPvtReadWriteSet{
DataModel: rwset.TxReadWriteSet_KV,
NsPvtRwset: []*rwset.NsPvtReadWriteSet{
{
Namespace: "otherCC:v1",
CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{
{
CollectionName: "topClassified",
Rwset: []byte{0, 0, 0, 4, 2},
},
},
},
},
},
},
},
},
}
for seqNum, each := range data {
coord1.On("GetPvtDataAndBlockByNum", seqNum).Return(each.block, each.pvtData, nil /* no error*/)
}
coord1.On("Close")
servicesAdapater := &ServicesMediator{GossipAdapter: g, MCSAdapter: &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor}}
stateMetrics := metrics.NewGossipMetrics(&disabled.Provider{}).StateMetrics
st := NewGossipStateProvider(chainID, servicesAdapater, coord1, stateMetrics, config)
defer st.Stop()
// Mocked state request message
requestMsg := new(receivedMessageMock)
// Get state request message, blocks [2...3]
requestGossipMsg := &proto.GossipMessage{
// Copy nonce field from the request, so it will be possible to match response
Nonce: 1,
Tag: proto.GossipMessage_CHAN_OR_ORG,
Channel: []byte(chainID),
Content: &proto.GossipMessage_StateRequest{StateRequest: &proto.RemoteStateRequest{
StartSeqNum: 2,
EndSeqNum: 3,
}},
}
msg, _ := requestGossipMsg.NoopSign()
requestMsg.On("GetGossipMessage").Return(msg)
requestMsg.On("GetConnectionInfo").Return(&proto.ConnectionInfo{
Auth: &proto.AuthInfo{},
})
// Channel to send responses back
responseChannel := make(chan proto.ReceivedMessage)
defer close(responseChannel)
requestMsg.On("Respond", mock.Anything).Run(func(args mock.Arguments) {
// Get gossip response to respond back on state request
response := args.Get(0).(*proto.GossipMessage)
// Wrap it up into received response
receivedMsg := new(receivedMessageMock)
// Create sign response
msg, _ := response.NoopSign()
// Mock to respond
receivedMsg.On("GetGossipMessage").Return(msg)
// Send response
responseChannel <- receivedMsg
})
// Send request message via communication channel into state transfer
commChannel <- requestMsg
// State transfer request should result in state response back
response := <-responseChannel
// Start the assertion section
stateResponse := response.GetGossipMessage().GetStateResponse()
assertion := assert.New(t)
// Nonce should be equal to Nonce of the request
assertion.Equal(response.GetGossipMessage().Nonce, uint64(1))
// Payload should not need be nil
assertion.NotNil(stateResponse)
assertion.NotNil(stateResponse.Payloads)
// Exactly two messages expected
assertion.Equal(len(stateResponse.Payloads), 2)
// Assert we have all data and it's same as we expected it
for _, each := range stateResponse.Payloads {
block := &pcomm.Block{}
err := pb.Unmarshal(each.Data, block)
assertion.NoError(err)
assertion.NotNil(block.Header)
testBlock, ok := data[block.Header.Number]
assertion.True(ok)
for i, d := range testBlock.block.Data.Data {
assertion.True(bytes.Equal(d, block.Data.Data[i]))
}
for i, p := range testBlock.pvtData {
pvtDataPayload := &proto.PvtDataPayload{}
err := pb.Unmarshal(each.PrivateData[i], pvtDataPayload)
assertion.NoError(err)
pvtRWSet := &rwset.TxPvtReadWriteSet{}
err = pb.Unmarshal(pvtDataPayload.Payload, pvtRWSet)
assertion.NoError(err)
assertion.True(pb.Equal(p.WriteSet, pvtRWSet))
}
}
}
type testPeer struct {
*mocks.GossipMock
id string
gossipChannel chan *proto.GossipMessage
commChannel chan proto.ReceivedMessage
coord *coordinatorMock
}
func (t testPeer) Gossip() <-chan *proto.GossipMessage {
return t.gossipChannel
}
func (t testPeer) Comm() chan proto.ReceivedMessage {
return t.commChannel
}
var peers = map[string]testPeer{
"peer1": {
id: "peer1",
gossipChannel: make(chan *proto.GossipMessage),
commChannel: make(chan proto.ReceivedMessage),
GossipMock: &mocks.GossipMock{},
coord: new(coordinatorMock),
},
"peer2": {
id: "peer2",
gossipChannel: make(chan *proto.GossipMessage),
commChannel: make(chan proto.ReceivedMessage),
GossipMock: &mocks.GossipMock{},
coord: new(coordinatorMock),
},
}
func TestTransferOfPvtDataBetweenPeers(t *testing.T) {
/*
This test covers pretty basic scenario, there are two peers: "peer1" and "peer2",
while peer2 missing a few blocks in the ledger therefore asking to replicate those
blocks from the first peers.
Test going to check that block from one peer will be replicated into second one and
have identical content.
*/
t.Parallel()
chainID := "testChainID"
// Initialize peer
for _, peer := range peers {
peer.On("Accept", mock.Anything, false).Return(peer.Gossip(), nil)
peer.On("Accept", mock.Anything, true).
Return(nil, peer.Comm()).
Once().
On("Accept", mock.Anything, true).
Return(nil, make(chan proto.ReceivedMessage))
peer.On("UpdateChannelMetadata", mock.Anything, mock.Anything)
peer.coord.On("Close")
peer.On("Close")
}
// First peer going to have more advanced ledger
peers["peer1"].coord.On("LedgerHeight", mock.Anything).Return(uint64(3), nil)
// Second peer has a gap of one block, hence it will have to replicate it from previous
peers["peer2"].coord.On("LedgerHeight", mock.Anything).Return(uint64(2), nil)
peers["peer1"].coord.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
peers["peer2"].coord.On("DoesPvtDataInfoExistInLedger", mock.Anything).Return(false, nil)
peers["peer1"].coord.On("GetPvtDataAndBlockByNum", uint64(2)).Return(&pcomm.Block{
Header: &pcomm.BlockHeader{
Number: 2,
DataHash: []byte{0, 0, 0, 1},
PreviousHash: []byte{0, 1, 1, 1},
},
Data: &pcomm.BlockData{
Data: [][]byte{{4}, {5}, {6}},
},
}, gutil.PvtDataCollections{&ledger.TxPvtData{
SeqInBlock: uint64(1),
WriteSet: &rwset.TxPvtReadWriteSet{
DataModel: rwset.TxReadWriteSet_KV,
NsPvtRwset: []*rwset.NsPvtReadWriteSet{
{
Namespace: "myCC:v1",
CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{
{
CollectionName: "mysecrectCollection",
Rwset: []byte{1, 2, 3, 4, 5},
},
},
},
},
},
}}, nil)
// Return membership of the peers
member2 := discovery.NetworkMember{
PKIid: common.PKIidType([]byte{2}),
Endpoint: "peer2:7051",
InternalEndpoint: "peer2:7051",
Properties: &proto.Properties{
LedgerHeight: 2,
},
}
member1 := discovery.NetworkMember{
PKIid: common.PKIidType([]byte{1}),
Endpoint: "peer1:7051",
InternalEndpoint: "peer1:7051",
Properties: &proto.Properties{
LedgerHeight: 3,
},
}
peers["peer1"].On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{member2})
peers["peer2"].On("PeersOfChannel", mock.Anything).Return([]discovery.NetworkMember{member1})
peers["peer2"].On("Send", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
request := args.Get(0).(*proto.GossipMessage)
requestMsg := new(receivedMessageMock)
msg, _ := request.NoopSign()
requestMsg.On("GetGossipMessage").Return(msg)
requestMsg.On("GetConnectionInfo").Return(&proto.ConnectionInfo{
Auth: &proto.AuthInfo{},
})
requestMsg.On("Respond", mock.Anything).Run(func(args mock.Arguments) {
response := args.Get(0).(*proto.GossipMessage)
receivedMsg := new(receivedMessageMock)
msg, _ := response.NoopSign()
receivedMsg.On("GetGossipMessage").Return(msg)
// Send response back to the peer
peers["peer2"].commChannel <- receivedMsg
})
peers["peer1"].commChannel <- requestMsg
})
wg := sync.WaitGroup{}
wg.Add(1)
peers["peer2"].coord.On("StoreBlock", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
wg.Done() // Done once second peer hits commit of the block
}).Return([]string{}, nil) // No pvt data to complete and no error
cryptoService := &cryptoServiceMock{acceptor: noopPeerIdentityAcceptor}
stateMetrics := metrics.NewGossipMetrics(&disabled.Provider{}).StateMetrics
mediator := &ServicesMediator{GossipAdapter: peers["peer1"], MCSAdapter: cryptoService}
peer1State := NewGossipStateProvider(chainID, mediator, peers["peer1"].coord, stateMetrics, config)
defer peer1State.Stop()
mediator = &ServicesMediator{GossipAdapter: peers["peer2"], MCSAdapter: cryptoService}
peer2State := NewGossipStateProvider(chainID, mediator, peers["peer2"].coord, stateMetrics, config)
defer peer2State.Stop()
// Make sure state was replicated
done := make(chan struct{})
go func() {
wg.Wait()
done <- struct{}{}
}()
select {
case <-done:
break
case <-time.After(30 * time.Second):
t.Fail()
}
}
func TestStateRequestValidator(t *testing.T) {
validator := &stateRequestValidator{}
err := validator.validate(&proto.RemoteStateRequest{
StartSeqNum: 10,
EndSeqNum: 5,
}, DefAntiEntropyBatchSize)
assert.Contains(t, err.Error(), "Invalid sequence interval [10...5).")
assert.Error(t, err)
err = validator.validate(&proto.RemoteStateRequest{
StartSeqNum: 10,
EndSeqNum: 30,
}, DefAntiEntropyBatchSize)
assert.Contains(t, err.Error(), "Requesting blocks range [10-30) greater than configured")
assert.Error(t, err)
err = validator.validate(&proto.RemoteStateRequest{
StartSeqNum: 10,
EndSeqNum: 20,
}, DefAntiEntropyBatchSize)
assert.NoError(t, err)
}
func waitUntilTrueOrTimeout(t *testing.T, predicate func() bool, timeout time.Duration) {
ch := make(chan struct{})
go func() {
t.Log("Started to spin off, until predicate will be satisfied.")
for !predicate() {
time.Sleep(1 * time.Second)
}
ch <- struct{}{}
t.Log("Done.")
}()
select {
case <-ch:
break
case <-time.After(timeout):
t.Fatal("Timeout has expired")
break
}
t.Log("Stop waiting until timeout or true")
}
func assertLogged(t *testing.T, r *floggingtest.Recorder, msg string) {
observed := func() bool { return len(r.MessagesContaining(msg)) > 0 }
waitUntilTrueOrTimeout(t, observed, 30*time.Second)
}
|
{
var peers []string
for _, port := range ports {
peers = append(peers, fmt.Sprintf("127.0.0.1:%d", port))
}
return peers
}
|
BotTraining.js
|
import React from "react";
import {Component} from "react";
import {connect} from "react-redux";
import closeLogo from "../assets/images/closeIcon.svg";
import {statusUpdateToLoginClose,statusUpdateToEmptyForm, saveIntentContent, statusUpdateToLoading} from "../actions/authen";
import {addIntentTextbox, removeIntentTextbox, handleIntentChange} from "../actions/authen";
import {v4} from "node-uuid";
import AppendIntentStatus from "./AppendIntentStatus";
class BotTraining extends Component {
constructor(props) {
super(props);
this.state = {intent:"", intent_Followup:"", intent_Followup_Response:"",textboxDiv_Id:'', followupIntentChange:[], followupResponseIntentChange:[], followupIntentContent:{}, validData:true};
this.handleFollowResponseChange = this.handleFollowResponseChange.bind(this);
this.handleIntentChange = this.handleIntentChange.bind(this);
this.handleIntentFollowChange = this.handleIntentFollowChange.bind(this);
this.handleSubmitIntent = this.handleSubmitIntent.bind(this);
this.handelAddIntentTextbox = this.handelAddIntentTextbox.bind(this);
this.saveTrainIntents = this.saveTrainIntents.bind(this);
this.handelCloseIntentDiv = this.handelCloseIntentDiv.bind(this);
};
handleIntentChange(event) {
this.setState({intent: event.target.value});
}
handleIntentFollowChange(event) {
this.setState({intent_Followup: event.target.value});
}
handleFollowResponseChange(event) {
this.setState({intent_Followup_Response: event.target.value});
}
handleSubmitIntent(event){
event.preventDefault();
}
handelAddIntentTextbox(event){
event.preventDefault();
this.state.textboxDiv_Id=v4();
this.props.addIntentTextbox(this.state.textboxDiv_Id);
}
handelCloseIntentDiv(event){
this.props.statusUpdateToLoginClose();
this.props.statusUpdateToEmptyForm();
}
handleFollowupIntentChange(textbox_id,event) {
console.log(this.state.followupIntentChange);
const followupIntentChange = this.state.followupIntentChange;
followupIntentChange[textbox_id+'1'] = event.target.value;
this.setState({ followupIntentChange: followupIntentChange });
}
handleFollowupResponseIntentChange(textbox_id, event) {
console.log(this.state.followupResponseIntentChange);
const followupResponseIntentChange = this.state.followupResponseIntentChange;
followupResponseIntentChange[textbox_id+'2'] = event.target.value;
this.setState({ followupResponseIntentChange: followupResponseIntentChange });
}
saveTrainIntents(event){
/*this.props.statusUpdateToLoading();*/
const div_Length = this.props.textboxAppend.length;
const idx_Array=[];
const followupIntentChange_content=[];
const followupResponseIntentChange_content=[];
followupIntentChange_content.push(this.state.intent_Followup);
followupResponseIntentChange_content.push(this.state.intent_Followup_Response);
{this.props.textboxAppend.map(textbox =>
idx_Array.push(textbox.idx),
)}
for (var i = 0; i < idx_Array.length; i++) {
followupIntentChange_content.push(this.state.followupIntentChange[idx_Array[i]+'1']);
followupResponseIntentChange_content.push(this.state.followupResponseIntentChange[idx_Array[i]+'2']);
};
this.state.followupIntentContent={
intentValue:this.state.intent,
followupIntentChange_content:followupIntentChange_content,
followupResponseIntentChange_content:followupResponseIntentChange_content
}
this.state.validData=true;
if(this.state.intent===''||this.state.intent===undefined||this.state.intent_Followup===''||this.state.intent_Followup===undefined||this.state.intent_Followup_Response===''||this.state.intent_Followup_Response===undefined){
this.state.validData=false;
}
for (var i = 0; i < idx_Array.length; i++) {
if((this.state.followupIntentChange[idx_Array[i]+'1']==='')||(this.state.followupIntentChange[idx_Array[i]+'1'])===undefined||(this.state.followupResponseIntentChange[idx_Array[i]+'2'])===undefined||(this.state.followupResponseIntentChange[idx_Array[i]+'2'])===''){
this.state.validData=false;
}
};
if(this.state.validData){
console.log(this.state.followupIntentContent);
this.props.saveIntentContent(this.state.followupIntentContent);
this.state.intent='';
this.state.intent_Followup='';
this.state.intent_Followup_Response='';
this.props.statusUpdateToEmptyForm();
}
}
render() {
if (!this.props.authenticated && this.props.isformBOTStatus) {
return (
<div className="sidenavdiv-container ">
<div className="row justify-content-start">
<div className="col-8">
<div className="sidenavdiv-header">
<div className="d-flex justify-content-between">
<p className="h6 text-white">
BOT Action Training
</p>
<img className="figure-img img-fluid rounded sidenavdiv-header__img cursor"
onClick={this.handelCloseIntentDiv} src={closeLogo}/>
</div>
</div>
</div>
</div>
<div className="row justify-content-start">
<div className="col-8">
<div className="sidenavdiv-body intentscroll">
<form onSubmit={this.handleSubmitIntent}>
<div className="form-group px-3 pt-3 ">
<AppendIntentStatus/>
|
placeholder="Enter Intent" required
value={this.state.intent} onChange={this.handleIntentChange}
/>
</div>
<div className=" px-3 position-relative allignintentdiv">
<label htmlFor="exampleInputPassword1">Intent Followup</label>
<input type="text" className="form-control form-control-lg form-padding poptextbox"
id="exampleInputPassword1" placeholder="Intent Followup" required
value={this.state.intent_Followup} onChange={this.handleIntentFollowChange}
/>
<label htmlFor="exampleInputPassword1"className={"popuptextalighlabel"}>Intent Followup Response</label>
<input type="text" className="popuptextalightextbox form-control form-control-lg form-padding poptextbox"
id="exampleInputPassword1" placeholder="Intent Followup Response" required
value={this.state.intent_Followup_Response} onChange={this.handleFollowResponseChange}
/>
</div>
<div>{this.props.textboxAppend.map(textbox =>(
<div className=" px-3 position-relative allignintentdiv" key={textbox.idx}>
<label htmlFor="exampleInputPassword1">Intent Followup</label>
<input type="text" className="form-control form-control-lg form-padding poptextbox"
id={textbox.idx+'1'} placeholder="Intent Followup" required
value={this.state.followupIntentChange[textbox.idx]} onChange={this.handleFollowupIntentChange.bind(this,textbox.idx)}
/>
<label htmlFor="exampleInputPassword1"className={"popuptextalighlabel"}>Intent Followup Response</label>
<input type="text" className="popuptextalightextbox form-control form-control-lg form-padding poptextbox"
id={textbox.idx+'2'} placeholder="Intent Followup Response" required
value={this.state.followupResponseIntentChange[textbox.idx]} onChange={this.handleFollowupResponseIntentChange.bind(this,textbox.idx)}
/>
<button type="button" onClick={() => this.props.removeIntentTextbox(textbox.idx)} className="removetextintent cursor">X</button>
</div>
))}</div>
<div className="d-flex justify-content-between px-5 pb-3 position-relative margin_div">
<button type="button" className="p-2 btn btn-info mr-1" onClick={this.handelAddIntentTextbox}>Add More</button>
<button type="submit" className="p-2 btn btn-info width_button" onClick={this.saveTrainIntents}>Save</button>
<button type="submit" className="p-2 btn btn-info width_button" onClick={this.handelCloseIntentDiv}>Cancel</button>
</div>
</form>
</div>
</div>
</div>
</div>
);
}
else {
return (
null
);
}
}
}
const mapStateToProps = (state) => {
return {
isformBOTStatus: state.status.formBotStatus,
textboxAppend: state.status_append
};
};
const mapDispatchToProps = (dispatch) => {
return {
statusUpdateToLoginClose: ()=>dispatch(statusUpdateToLoginClose()),
statusUpdateToEmptyForm: ()=>dispatch(statusUpdateToEmptyForm()),
addIntentTextbox: (textboxDiv_Id)=>dispatch(addIntentTextbox(textboxDiv_Id)),
removeIntentTextbox: (textbox_id)=>dispatch(removeIntentTextbox(textbox_id)),
saveIntentContent: (followupIntentContent)=>dispatch(saveIntentContent(followupIntentContent)),
statusUpdateToLoading: ()=>dispatch(statusUpdateToLoading())
};
};
export default connect(mapStateToProps, mapDispatchToProps)(BotTraining);
|
<label htmlFor="exampleInputEmail1">Intent</label>
<input type="text" className="popuptextalighfull form-control form-control-lg form-padding"
id="exampleInputEmail1" aria-describedby="emailHelp"
|
models.py
|
from django.db import models
class Mission(models.Model):
name = models.CharField(max_length=200)
start_date = models.DateTimeField('date discovered')
def __unicode__(self):
return self.name
class Planet(models.Model):
name = models.CharField(max_length=200)
discovery_date = models.DateTimeField('date discovered')
mass = models.FloatField()
radius = models.FloatField()
misson = models.ForeignKey(Mission)
|
def __unicode__(self):
return self.name
|
|
idempotency.rs
|
use super::types::ApiResponse;
use bytes::Bytes;
use futures::executor::spawn;
use futures::{
future::{err, ok, Either},
Future,
};
use http::StatusCode;
use interledger_http::error::*;
use log::error;
#[derive(Debug, Clone, PartialEq)]
pub struct IdempotentData {
pub status: StatusCode,
pub body: Bytes,
pub input_hash: [u8; 32],
}
impl IdempotentData {
pub fn new(status: StatusCode, body: Bytes, input_hash: [u8; 32]) -> Self {
Self {
status,
body,
input_hash,
}
}
}
pub trait IdempotentStore {
/// Returns the API response that was saved when the idempotency key was used
/// Also returns a hash of the input data which resulted in the response
fn load_idempotent_data(
&self,
idempotency_key: String,
) -> Box<dyn Future<Item = Option<IdempotentData>, Error = ()> + Send>;
/// Saves the data that was passed along with the api request for later
/// The store MUST also save a hash of the input, so that it errors out on requests
fn save_idempotent_data(
&self,
idempotency_key: String,
input_hash: [u8; 32],
status_code: StatusCode,
data: Bytes,
) -> Box<dyn Future<Item = (), Error = ()> + Send>;
}
// Helper function that returns any idempotent data that corresponds to a
// provided idempotency key. It fails if the hash of the input that
// generated the idempotent data does not match the hash of the provided input.
fn check_idempotency<S>(
store: S,
idempotency_key: String,
input_hash: [u8; 32],
) -> impl Future<Item = Option<(StatusCode, Bytes)>, Error = ApiError>
where
S: IdempotentStore + Clone + Send + Sync + 'static,
{
store
.load_idempotent_data(idempotency_key.clone())
.map_err(move |_| IDEMPOTENT_STORE_CALL_ERROR.clone())
.and_then(move |ret: Option<IdempotentData>| {
if let Some(ret) = ret {
// Check if the hash (ret.2) of the loaded idempotent data matches the hash
// of the provided input data. If not, we should error out since
// the caller provided an idempotency key that was used for a
|
if ret.input_hash == input_hash {
Ok(Some((ret.status, ret.body)))
} else {
Ok(Some((
StatusCode::from_u16(409).unwrap(),
Bytes::from(IDEMPOTENCY_CONFLICT_ERR),
)))
}
} else {
Ok(None)
}
})
}
// make_idempotent_call takes a function instead of direct arguments so that we
// can reuse it for both the messages and the settlements calls
pub fn make_idempotent_call<S, F>(
store: S,
non_idempotent_function: F,
input_hash: [u8; 32],
idempotency_key: Option<String>,
// As per the spec, the success status code is independent of the
// implemented engine's functionality
status_code: StatusCode,
// The default value is used when the engine returns a default return type
default_return_value: Bytes,
) -> impl Future<Item = (StatusCode, Bytes), Error = ApiError>
where
F: FnOnce() -> Box<dyn Future<Item = ApiResponse, Error = ApiError> + Send>,
S: IdempotentStore + Clone + Send + Sync + 'static,
{
if let Some(idempotency_key) = idempotency_key {
// If there an idempotency key was provided, check idempotency
// and the key was not present or conflicting with an existing
// key, perform the call and save the idempotent return data
Either::A(
check_idempotency(store.clone(), idempotency_key.clone(), input_hash).and_then(
move |ret: Option<(StatusCode, Bytes)>| {
if let Some(ret) = ret {
if ret.0.is_success() {
Either::A(Either::A(ok((ret.0, ret.1))))
} else {
let err_msg = ApiErrorType {
r#type: &ProblemType::Default,
status: ret.0,
title: "Idempotency Error",
};
// if check_idempotency returns an error, then it
// has to be an idempotency error
let ret_error = ApiError::from_api_error_type(&err_msg)
.detail(String::from_utf8_lossy(&ret.1).to_string());
Either::A(Either::B(err(ret_error)))
}
} else {
Either::B(
non_idempotent_function().map_err({
let store = store.clone();
let idempotency_key = idempotency_key.clone();
move |ret: ApiError| {
let status_code = ret.status;
let data = Bytes::from(ret.detail.clone().unwrap_or_default());
spawn(store.save_idempotent_data(
idempotency_key,
input_hash,
status_code,
data,
).map_err(move |_| error!("Failed to connect to the store! The request will not be idempotent if retried.")));
ret
}})
.map(move |ret| {
let data = match ret {
ApiResponse::Default => default_return_value,
ApiResponse::Data(d) => d,
};
(status_code, data)
}).and_then(
move |ret: (StatusCode, Bytes)| {
store
.save_idempotent_data(
idempotency_key,
input_hash,
ret.0,
ret.1.clone(),
)
.map_err(move |_| {
error!("Failed to connect to the store! The request will not be idempotent if retried.");
IDEMPOTENT_STORE_CALL_ERROR.clone()
})
.and_then(move |_| Ok((ret.0, ret.1)))
},
),
)
}
},
),
)
} else {
// otherwise just make the call w/o any idempotency saves
Either::B(
non_idempotent_function()
.map(move |ret| {
let data = match ret {
ApiResponse::Default => default_return_value,
ApiResponse::Data(d) => d,
};
(status_code, data)
})
.and_then(move |ret: (StatusCode, Bytes)| Ok((ret.0, ret.1))),
)
}
}
|
// different input.
|
SalesReturnListApp.ts
|
/**
* @license
* Copyright Color-Coding Studio. All Rights Reserved.
*
* Use of this source code is governed by an Apache License, Version 2.0
* that can be found in the LICENSE file at http://www.apache.org/licenses/LICENSE-2.0
*/
namespace sales {
export namespace app {
/** 列表应用-销售退货 */
export class SalesReturnListA
|
ListApplication<ISalesReturnListView, bo.SalesReturn> {
/** 应用标识 */
static APPLICATION_ID: string = "d9abfa76-bb8f-4d4e-9bc3-1aa241c5d9e9";
/** 应用名称 */
static APPLICATION_NAME: string = "sales_app_salesreturn_list";
/** 业务对象编码 */
static BUSINESS_OBJECT_CODE: string = bo.SalesReturn.BUSINESS_OBJECT_CODE;
/** 构造函数 */
constructor() {
super();
this.id = SalesReturnListApp.APPLICATION_ID;
this.name = SalesReturnListApp.APPLICATION_NAME;
this.boCode = SalesReturnListApp.BUSINESS_OBJECT_CODE;
this.description = ibas.i18n.prop(this.name);
}
/** 注册视图 */
protected registerView(): void {
super.registerView();
// 其他事件
this.view.editDataEvent = this.editData;
this.view.deleteDataEvent = this.deleteData;
}
/** 视图显示后 */
protected viewShowed(): void {
// 视图加载完成
}
/** 查询数据 */
protected fetchData(criteria: ibas.ICriteria): void {
this.busy(true);
let that: this = this;
let boRepository: bo.BORepositorySales = new bo.BORepositorySales();
boRepository.fetchSalesReturn({
criteria: criteria,
onCompleted(opRslt: ibas.IOperationResult<bo.SalesReturn>): void {
try {
if (opRslt.resultCode !== 0) {
throw new Error(opRslt.message);
}
if (opRslt.resultObjects.length === 0) {
that.proceeding(ibas.emMessageType.INFORMATION, ibas.i18n.prop("shell_data_fetched_none"));
}
that.view.showData(opRslt.resultObjects);
that.busy(false);
} catch (error) {
that.messages(error);
}
}
});
this.proceeding(ibas.emMessageType.INFORMATION, ibas.i18n.prop("shell_fetching_data"));
}
/** 新建数据 */
protected newData(): void {
let app: SalesReturnEditApp = new SalesReturnEditApp();
app.navigation = this.navigation;
app.viewShower = this.viewShower;
app.run();
}
/** 查看数据,参数:目标数据 */
protected viewData(data: bo.SalesReturn): void {
// 检查目标数据
if (ibas.objects.isNull(data)) {
this.messages(ibas.emMessageType.WARNING, ibas.i18n.prop("shell_please_chooose_data",
ibas.i18n.prop("shell_data_view")
));
return;
}
let app: SalesReturnViewApp = new SalesReturnViewApp();
app.navigation = this.navigation;
app.viewShower = this.viewShower;
app.run(data);
}
/** 编辑数据,参数:目标数据 */
protected editData(data: bo.SalesReturn): void {
// 检查目标数据
if (ibas.objects.isNull(data)) {
this.messages(ibas.emMessageType.WARNING, ibas.i18n.prop("shell_please_chooose_data",
ibas.i18n.prop("shell_data_edit")
));
return;
}
let app: SalesReturnEditApp = new SalesReturnEditApp();
app.navigation = this.navigation;
app.viewShower = this.viewShower;
app.run(data);
}
/** 删除数据,参数:目标数据集合 */
protected deleteData(data: bo.SalesReturn | bo.SalesReturn[]): void {
// 检查目标数据
if (ibas.objects.isNull(data)) {
this.messages(ibas.emMessageType.WARNING, ibas.i18n.prop("shell_please_chooose_data",
ibas.i18n.prop("shell_data_delete")
));
return;
}
let beDeleteds: ibas.ArrayList<bo.SalesReturn> = new ibas.ArrayList<bo.SalesReturn>();
if (data instanceof Array) {
for (let item of data) {
item.delete();
beDeleteds.add(item);
}
} else {
data.delete();
beDeleteds.add(data);
}
// 没有选择删除的对象
if (beDeleteds.length === 0) {
this.messages(ibas.emMessageType.WARNING, ibas.i18n.prop("shell_please_chooose_data",
ibas.i18n.prop("shell_data_delete")
));
return;
}
let that: this = this;
this.messages({
type: ibas.emMessageType.QUESTION,
title: ibas.i18n.prop(this.name),
message: ibas.i18n.prop("shell_whether_to_delete", beDeleteds.length),
actions: [ibas.emMessageAction.YES, ibas.emMessageAction.NO],
onCompleted(action: ibas.emMessageAction): void {
if (action === ibas.emMessageAction.YES) {
try {
let boRepository: bo.BORepositorySales = new bo.BORepositorySales();
let saveMethod: Function = function (beSaved: bo.SalesReturn): void {
boRepository.saveSalesReturn({
beSaved: beSaved,
onCompleted(opRslt: ibas.IOperationResult<bo.SalesReturn>): void {
try {
if (opRslt.resultCode !== 0) {
throw new Error(opRslt.message);
}
// 保存下一个数据
let index: number = beDeleteds.indexOf(beSaved) + 1;
if (index > 0 && index < beDeleteds.length) {
saveMethod(beDeleteds[index]);
} else {
// 处理完成
that.busy(false);
that.messages(ibas.emMessageType.SUCCESS,
ibas.i18n.prop("shell_data_delete") + ibas.i18n.prop("shell_sucessful"));
}
} catch (error) {
that.messages(ibas.emMessageType.ERROR,
ibas.i18n.prop("shell_data_delete_error", beSaved, error.message));
}
}
});
that.proceeding(ibas.emMessageType.INFORMATION, ibas.i18n.prop("shell_data_deleting", beSaved));
};
that.busy(true);
// 开始保存
saveMethod(beDeleteds.firstOrDefault());
} catch (error) {
that.busy(false);
that.messages(error);
}
}
}
});
}
/** 获取服务的契约 */
protected getServiceProxies(): ibas.IServiceProxy<ibas.IServiceContract>[] {
return [
new ibas.BOListServiceProxy({
data: this.view.getSelecteds(),
converter: new bo.DataConverter()
})
];
}
}
/** 视图-销售退货 */
export interface ISalesReturnListView extends ibas.IBOListView {
/** 编辑数据事件,参数:编辑对象 */
editDataEvent: Function;
/** 删除数据事件,参数:删除对象集合 */
deleteDataEvent: Function;
/** 显示数据 */
showData(datas: bo.SalesReturn[]): void;
/** 获取选择的数据 */
getSelecteds(): bo.SalesReturn[];
}
}
}
|
pp extends ibas.BO
|
detect.py
|
import os
import torch
from torch.utils.data import Dataset, random_split, DataLoader
from PIL import Image
import torchvision.models as models
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
# from sklearn.metrics import f1_score
import torch.nn.functional as F
import torch.nn as nn
from torchvision.utils import make_grid
from torchvision.datasets import ImageFolder
class ImageClassificationBase(nn.Module):
def training_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
acc = accuracy(out, labels) # Calculate accuracy
return {"val_loss": loss.detach(), "val_acc": acc}
def validation_epoch_end(self, outputs):
batch_losses = [x["val_loss"] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean() # Combine losses
batch_accs = [x["val_acc"] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies
return {"val_loss": epoch_loss.item(), "val_acc": epoch_acc.item()}
def epoch_end(self, epoch, result):
print(
"Epoch [{}], val_loss: {:.4f}, val_acc: {:.4f}".format(
epoch, result["val_loss"], result["val_acc"]
)
)
def conv_block(in_channels, out_channels, pool=False):
layers = [
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
]
if pool:
layers.append(nn.MaxPool2d(2))
return nn.Sequential(*layers)
class ResNet9(ImageClassificationBase):
def __init__(self, in_channels, num_classes):
super().__init__()
self.conv1 = conv_block(in_channels, 64)
self.conv2 = conv_block(64, 128, pool=True) # 128*32
self.res1 = nn.Sequential(conv_block(128, 128), conv_block(128, 128))
self.conv3 = conv_block(128, 256, pool=True) # 256*16
self.conv4 = conv_block(256, 512, pool=True) # 512*8
self.res2 = nn.Sequential(conv_block(512, 512), conv_block(512, 512))
self.conv5 = conv_block(512, 1024, pool=True) # 1024*4
self.res3 = nn.Sequential(conv_block(1024, 1024), conv_block(1024, 1024))
self.classifier = nn.Sequential(
nn.MaxPool2d(4), nn.Flatten(), nn.Dropout(0.2), nn.Linear(1024, num_classes)
)
def forward(self, xb):
out = self.conv1(xb)
out = self.conv2(out)
out = self.res1(out) + out
out = self.conv3(out)
out = self.conv4(out)
out = self.res2(out) + out
out = self.conv5(out)
out = self.res3(out) + out
out = self.classifier(out)
return out
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device("cuda")
else:
return torch.device("cpu")
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list, tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader:
"""Wrap a dataloader to move data to a device"""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a batch of data after moving it to device"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches"""
return len(self.dl)
def preprocess(image):
transformations = transforms.Compose(
[transforms.Resize((64, 64)), transforms.ToTensor()]
)
image = transformations(image)
return image
class z:
"""Wrap a dataloader to move data to a device"""
classes = []
def __init__(self):
self.classes = ["COVID", "Lung_Opacity", "Normal", "Viral Pneumonia"]
def predict_image(img, model):
|
device = get_default_device()
# Convert to a batch of 1
xb = to_device(img.unsqueeze(0), device)
# xb = img.unsqueeze(0)
# Get predictions from model
yb = model(xb)
# Pick index with highest probability
prob, preds = torch.max(yb, dim=1)
print(preds)
dataset = z()
# Retrieve the class label
return dataset.classes[preds[0].item()]
|
|
test_subarray.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
|
import pytest
import numpy as np
asdf = pytest.importorskip('asdf', minversion='2.0.0.dev0')
from asdf import util
from asdf.tests import helpers
from ..subarray import Subarray
def test1(tmpdir, ret=False):
subarray = Subarray(offset=(100, 131), size=(256, 256), name='SA1')
tree = {'subarray': subarray}
if ret:
return subarray
helpers.assert_roundtrip_tree(tree, tmpdir)
| |
builtin_datatypes.rs
|
use serde::{Deserialize, Serialize};
use cdr_encoding_size::CdrEncodingSize;
use crate::{
dds::traits::key::Key,
structure::{guid::GUID, time::Timestamp},
};
/// Analog of DDS GUID in ROS2 builtin datastructures
#[derive(
Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize, CdrEncodingSize,
)]
pub struct Gid {
data: [u8; 24],
}
impl Gid {
pub fn from_guid(guid: GUID) -> Self {
let mut data: [u8; 24] = [0; 24];
data[..12].clone_from_slice(&guid.prefix.bytes);
data[12..15].clone_from_slice(&guid.entity_id.entity_key);
data[15..16].clone_from_slice(&[u8::from(guid.entity_id.entity_kind)]);
Self { data }
}
}
impl Key for Gid {}
/// Information about the node in ROS2 network
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct NodeInfo {
node_namespace: String,
node_name: String,
reader_guid: Vec<Gid>,
writer_guid: Vec<Gid>,
}
impl NodeInfo {
pub fn new(name: String, namespace: String) -> Self {
Self {
node_namespace: namespace,
node_name: name,
reader_guid: Vec::new(),
writer_guid: Vec::new(),
}
}
pub fn namespace(&self) -> &str
|
pub fn name(&self) -> &str {
&self.node_name
}
pub fn get_reader_gid(&self) -> Vec<Gid> {
self.reader_guid.clone()
}
pub fn get_writer_gid(&self) -> Vec<Gid> {
self.writer_guid.clone()
}
/// Full name of the node namespace + name eg. /some_node
pub fn get_full_name(&self) -> String {
let mut name = self.node_namespace.clone();
name.push_str(&self.node_name);
name
}
pub fn add_writer(&mut self, gid: Gid) {
if !self.writer_guid.contains(&gid) {
self.writer_guid.push(gid);
}
}
pub fn add_reader(&mut self, gid: Gid) {
if !self.reader_guid.contains(&gid) {
self.reader_guid.push(gid);
}
}
/// Clears all reader and writer guids
pub fn clear_all(&mut self) {
self.reader_guid.clear();
self.writer_guid.clear();
}
}
/// Information structure for other DomainParticipants in ROS2 network
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ROSParticipantInfo {
guid: Gid,
nodes: Vec<NodeInfo>,
}
impl ROSParticipantInfo {
pub fn new(guid: Gid, nodes: Vec<NodeInfo>) -> Self {
Self { guid, nodes }
}
pub fn guid(&self) -> Gid {
self.guid
}
pub fn into_nodes(self) -> Vec<NodeInfo> {
self.nodes
}
pub fn nodes(&self) -> &Vec<NodeInfo> {
&self.nodes
}
pub fn nodes_mut(&mut self) -> &mut Vec<NodeInfo> {
&mut self.nodes
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ParameterEvents {
timestamp: Timestamp,
// fully qualified path
node: String,
new_parameters: Vec<Parameter>,
changed_parameters: Vec<Parameter>,
deleted_parameters: Vec<Parameter>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Parameter {
name: String,
value: ParameterValue,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ParameterValue {
ptype: u8,
boolean_value: bool,
int_value: i64,
double_value: f64,
string_value: String,
byte_array: Vec<u8>,
bool_array: Vec<bool>,
int_array: Vec<i64>,
double_array: Vec<f64>,
string_array: Vec<String>,
}
/// Rosout message structure, received from RosParticipant rosout reader
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Log {
timestamp: Timestamp,
level: u8,
name: String,
msg: String,
file: String,
function: String,
line: u32,
}
impl Log {
/// Timestamp when rosout message was sent
pub fn get_timestamp(&self) -> &Timestamp {
&self.timestamp
}
/// Rosout level
pub fn get_level(&self) -> u8 {
self.level
}
/// Name of the rosout message
pub fn name(&self) -> &str {
&self.name
}
/// Actual message
pub fn get_msg(&self) -> &str {
&self.msg
}
pub fn get_file(&self) -> &str {
&self.file
}
pub fn get_function(&self) -> &str {
&self.function
}
pub fn get_line(&self) -> u32 {
self.line
}
}
|
{
&self.node_namespace
}
|
flag.py
|
import itertools
from multiprocessing import Manager
from pyaugmecon.options import Options
class Flag(object):
def __init__(self, opts: Options):
self.opts = opts
if self.opts.shared_flag:
self.flag = Manager().dict()
else:
self.flag = {}
def
|
(self, flag_range, value, iter):
indices = [tuple([n for n in flag_range(o)]) for o in iter]
iter = list(itertools.product(*indices))
tmp_flag = {}
for gp in iter:
tmp_flag[gp] = value
self.flag.update(tmp_flag)
def get(self, i):
return self.flag.get(i, 0)
|
set
|
bulk_producto2.py
|
import pandas as pd
import glob
import re
df = []
for file in glob.glob("../output/producto2/*.csv"):
date = re.search("\d{4}-\d{2}-\d{2}", file).group(0).replace("-", "/")
fragment = pd.read_csv(file)
fragment["Fecha"] = date
df.append(fragment)
df = pd.concat(df)
# Reemplaza nombres de comuna, para coincidir con los publicados por SUBDERE
df["Comuna"] = df["Comuna"].replace({"Coyhaique": "Coihaique", "OHiggins": "O'Higgins"})
# Lee IDs de comunas desde página web oficial de SUBDERE
df_dim_comunas = pd.read_excel("http://www.subdere.gov.cl/sites/default/files/documentos/cut_2018_v03.xls", encoding="utf-8")
# Crea columna sin tildes, para hacer merge con datos publicados
df_dim_comunas["Comuna"] = df_dim_comunas["Nombre Comuna"].str.normalize("NFKD").str.encode("ascii", errors="ignore").str.decode("utf-8")
df = df.merge(df_dim_comunas, on="Comuna", how="outer")
|
"Nombre Región": "Region",
"Nombre Provincia": "Provincia",
"Nombre Comuna": "Comuna",
"Código Región": "Region ID",
"Código Provincia": "Provincia ID",
"Código Comuna 2017": "Comuna ID"
})
df["Casos Confirmados"] = df["Casos Confirmados"].fillna("-")
df["Tasa"] = df.apply(lambda x: (100000 * int(x["Casos Confirmados"]) / x["Poblacion"]) if x["Casos Confirmados"] != "-" else "-", axis=1)
# Crea output de datos en CSV / JSON
df.to_csv("../output/producto6/bulk/producto2.csv", index=False)
df.to_json("../output/producto6/bulk/producto2.json", orient="records")
|
df = df.drop(columns=["Comuna", "Region", "Codigo region", "Codigo comuna"])
df = df.rename(columns={
|
Aperture.js
|
eval(function(p,a,c,k,e,r){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--)r[e(c)]=k[c]||e(c);k=[function(e){return r[e]}];e=function(){return'\\w+'};c=1};while(c--)if(k[c])p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c]);return p}('g.A(\'C\',p(){z(y,0,0,1)},v);$(g).w(\'B\',p(){d($(g).j()>r){$(\'#m\').x(\'n-o\');8.9(\'l\').a.h(\'q\');8.9(\'l\').a.f(\'s\')}e{$(\'#m\').D(\'n-o\');8.9(\'l\').a.f(\'q\');8.9(\'l\').a.h(\'s\')}d($(g).j()<r){k(b i=1;i<=6;i++){b 7=\'t\'+i;8.9(7).a.f(\'c\')}}e d($(g).j()<u){k(b i=1;i<=6;i++){b 7=\'t\'+i;d(i==1)8.9(7).a.h(\'c\');e 8.9(7).a.f(\'c\')}}e d($(g).j()<E){k(b i=1;i<=6;i++){b 7=\'t\'+i;d(i==2)8.9(7).a.h(\'c\');e 8.9(7).a.f(\'c\')}}e d($(g).j()<F){k(b i=1;i<=6;i++){b 7=\'t\'+i;d(i==3)8.9(7).a.h(\'c\');e 8.9(7).a.f(\'c\')}}e d($(g).j()<G){k(b i=1;i<=6;i++){b 7=\'t\'+i;d(i==4)8.9(7).a.h(\'c\');e 8.9(7).a.f(\'c\')}}e d($(g).j()<H){k(b i=1;i<=6;i++){b 7=\'t\'+i;d(i==5)8.9(7).a.h(\'c\');e 8.9(7).a.f(\'c\')}}e{k(b i=1;i<=6;i++){b 7=\'t\'+i;d(i==6)8.9(7).a.h(\'c\');e 8.9(7).a.f(\'c\')}}});',44,44,'|||||||N|document|getElementById|classList|var|underline|if|else|remove|window|add||scrollTop|for|sublogo|nav|navbar|fixed|function|visible|140|hidden||880|false|bind|addClass|scrollTo|setTimeout|addEventListener|scroll|load|removeClass|1650|2680|3220|3640'.split('|'),0,{}))
| ||
DrawerContainer.js
|
// Smart container component
import React, { Component } from 'react';
import { Image } from 'react-native';
import { NoHeaderLayout } from '../components/NoHeaderLayout';
import { connect } from 'react-redux';
import { DrawerTitle } from '../components/DrawerTitle';
// imports all action functions as an acitons object
import * as actions from '../actions/drawer';
// responsible for the drawer view
class DrawerContainer extends Component {
constructor(props) {
super(props);
}
render() {
return (
<NoHeaderLayout>
{/*
loginStatus needs to be passed to the title to determine whether the sign in button should be displayed or not.
If the user has logged in then their first name will be displayed instead of the sign in option
*/}
|
}
}
// returns an object
const mapStateToProps = (appState, navigationState) => ({
navigation: navigationState.navigation,
screenProps: navigationState.screenProps,
navigatorStack: appState.navigatorStack,
// DrawerContainer container component needs to have access to the loginStatus attribute
// of the store so that it can display the Sign in option depending only on whether the user has logged in or not
loginStatus: appState.loginStatus
});
// returns an object
const mapDispatchToProps = dispatch => ({
});
export default connect(mapStateToProps, null)(DrawerContainer);
|
<DrawerTitle loginStatus = {this.props.loginStatus}/>
</NoHeaderLayout>
);
|
course.actions.ts
|
import { createAction, props } from "@ngrx/store";
import {Course} from './model/course';
export const loadAllCourses = createAction(
"[Courses Resolver] Load All Courses"
);
|
);
|
export const allCoursesLoaded = createAction(
"[Load Courses Effect] All Courses Loaded",
props<{courses: Course[]}>()
|
hostname.rs
|
#![crate_id(name="hostname", vers="1.0.0", author="Alan Andrade")]
/*
* This file is part of the uutils coreutils package.
*
* (c) Alan Andrade <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*
* Synced with:
*
* https://www.opensource.apple.com/source/shell_cmds/shell_cmds-170/hostname/hostname.c?txt
*/
extern crate getopts;
extern crate libc;
use std::{os,str};
use getopts::{optflag, getopts, usage};
extern {
fn gethostname(name: *libc::c_char, namelen: libc::size_t) -> libc::c_int;
fn sethostname(name: *libc::c_char, namelen: libc::c_int) -> libc::c_int;
}
#[allow(dead_code)]
fn main () { uumain(os::args()); }
pub fn uumain(args: Vec<String>) {
let program = args.get(0);
let options = [
optflag("f", "full", "Default option to show full name"),
|
];
let matches = match getopts(args.tail(), options) {
Ok(m) => { m }
_ => { help_menu(program.as_slice(), options); return; }
};
if matches.opt_present("h") {
help_menu(program.as_slice(), options);
return
}
if matches.opt_present("V") { version(); return }
match matches.free.len() {
0 => {
let hostname = xgethostname();
if matches.opt_present("s") {
let pos = hostname.as_slice().find_str(".");
if pos.is_some() {
println!("{:s}", hostname.as_slice().slice_to(pos.unwrap()));
return;
}
}
println!("{:s}", hostname.as_slice());
}
1 => { xsethostname( matches.free.last().unwrap().as_slice() ) }
_ => { help_menu(program.as_slice(), options); }
};
}
fn version() {
println!("hostname 1.0.0");
}
fn help_menu(program: &str, options: &[getopts::OptGroup]) {
version();
println!("");
println!("Usage:");
println!(" {:s} [OPTION]... [HOSTNAME]", program);
println!("");
print!("{:s}", usage("Print or set the system's host name.", options));
}
fn xgethostname() -> String {
let namelen = 256u;
let mut name = Vec::from_elem(namelen, 0u8);
let err = unsafe {
gethostname (name.as_mut_ptr() as *libc::c_char,
namelen as libc::size_t)
};
if err != 0 {
fail!("Cannot determine hostname");
}
let last_char = name.iter().position(|byte| *byte == 0).unwrap_or(namelen);
str::from_utf8(name.slice_to(last_char)).unwrap().to_string()
}
fn xsethostname(name: &str) {
let vec_name: Vec<libc::c_char> = name.bytes().map(|c| c as i8).collect();
let err = unsafe {
sethostname (vec_name.as_ptr(), vec_name.len() as i32)
};
if err != 0 {
println!("Cannot set hostname to {:s}", name);
}
}
|
optflag("s", "slice subdomain", "Cuts the subdomain off if any"),
optflag("h", "help", "Show help"),
optflag("V", "version", "Show program's version")
|
park_queue.rs
|
use crate::mutex::{Mutex, SpinLock};
use crate::queue::{Queue, TimeoutQueue, TryQueue};
use crate::{ThreadFunctions, ThreadParker, ThreadTimeoutParker, TimeFunctions};
use alloc::collections::VecDeque;
use alloc::sync::{Arc, Weak};
use core::sync::atomic::{AtomicBool, Ordering};
use core::time::Duration;
/// A [`ParkQueue`] that uses std functions.
#[cfg(feature = "std")]
pub type ParkQueueStd<T> = ParkQueue<T, crate::StdThreadFunctions>;
/// A queue based on [`VecDeque`]s and parking.
#[derive(Debug)]
pub struct ParkQueue<T, CS>
where
CS: ThreadParker,
{
inner: SpinLock<ParkQueueInner<T, CS>, CS>,
}
impl<T, CS> Default for ParkQueue<T, CS>
where
CS: ThreadParker,
{
fn default() -> Self {
Self {
inner: SpinLock::new(ParkQueueInner {
queue: Default::default(),
parkers: VecDeque::new(),
}),
}
}
}
impl<T, CS> TryQueue for ParkQueue<T, CS>
where
CS: ThreadParker + ThreadFunctions,
CS::ThreadId: Clone,
{
type Item = T;
fn try_push(&self, value: Self::Item) -> Result<(), Self::Item> {
let mut guard = self.inner.lock();
guard.queue.push_back(value);
while let Some(parker) = guard.parkers.pop_front() {
if let Some(parker) = parker.upgrade() {
parker.1.store(true, Ordering::Release);
CS::unpark(parker.0.clone());
break;
}
}
Ok(())
}
fn try_pop(&self) -> Option<Self::Item> {
self.inner.lock().queue.pop_front()
}
}
impl<T, CS> Queue for ParkQueue<T, CS>
where
CS: ThreadParker + ThreadFunctions,
CS::ThreadId: Clone,
{
fn push(&self, value: Self::Item) {
self.try_push(value)
.unwrap_or_else(|_| panic!("Try push should not fail!"));
}
fn pop(&self) -> Self::Item {
let mut guard = self.inner.lock();
if let Some(value) = guard.queue.pop_front() {
return value;
}
let self_swap = Arc::new((CS::current_thread(), AtomicBool::new(false)));
guard.parkers.push_back(Arc::downgrade(&self_swap));
loop {
drop(guard);
CS::park();
guard = self.inner.lock();
if self_swap.1.load(Ordering::Acquire) {
if let Some(value) = guard.queue.pop_front() {
return value;
} else {
guard.parkers.push_front(Arc::downgrade(&self_swap));
}
}
}
}
}
impl<T, CS> TimeoutQueue for ParkQueue<T, CS>
where
CS: ThreadTimeoutParker + ThreadFunctions + TimeFunctions,
CS::ThreadId: Clone,
{
fn
|
(&self, value: Self::Item, _timeout: Duration) -> Result<(), Self::Item> {
self.try_push(value)
.unwrap_or_else(|_| panic!("Try push should not fail!"));
Ok(())
}
fn pop_timeout(&self, timeout: Duration) -> Option<Self::Item> {
let end = CS::current_time() + timeout;
let mut guard = self.inner.lock();
if let Some(value) = guard.queue.pop_front() {
return Some(value);
}
let self_swap = Arc::new((CS::current_thread(), AtomicBool::new(false)));
guard.parkers.push_back(Arc::downgrade(&self_swap));
loop {
drop(guard);
let current_time = CS::current_time();
if current_time < end {
CS::park_timeout(end - current_time);
}
guard = self.inner.lock();
if self_swap.1.load(Ordering::Acquire) {
if let Some(value) = guard.queue.pop_front() {
return Some(value);
} else if CS::current_time() >= end {
return None;
} else {
guard.parkers.push_front(Arc::downgrade(&self_swap));
}
}
if CS::current_time() >= end {
return None;
}
}
}
}
#[derive(Debug)]
struct ParkQueueInner<T, CS>
where
CS: ThreadParker,
{
queue: VecDeque<T>,
/// True if should wake
parkers: VecDeque<Weak<(CS::ThreadId, AtomicBool)>>,
}
#[cfg(test)]
mod test {
use crate::queue::test::{queue_test, try_queue_test};
use crate::queue::ParkQueue;
#[cfg(feature = "std")]
use crate::StdThreadFunctions;
#[cfg(feature = "std")]
#[test]
fn function_test() {
try_queue_test(ParkQueue::<_, StdThreadFunctions>::default());
queue_test(ParkQueue::<_, StdThreadFunctions>::default());
}
}
|
push_timeout
|
__main__.py
|
import logging
import sys
from vax_common.vax_config import get_config
from vax_generator.vax_generator import VaxGenerator
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
def
|
():
generator = VaxGenerator(get_config())
generator.run()
if __name__ == "__main__":
main()
|
main
|
basic.py
|
#######################################
# IMPORTS
#######################################
from strings_with_arrows import *
import string
#######################################
# CONSTANTS
#######################################
DIGITS = '0123456789'
LETTERS = string.ascii_letters
LETTERS_DIGITS = LETTERS + DIGITS
#######################################
# ERRORS
#######################################
class Error:
def __init__(self, pos_start, pos_end, error_name, details):
self.pos_start = pos_start
self.pos_end = pos_end
self.error_name = error_name
self.details = details
def as_string(self):
result = f'{self.error_name}: {self.details}\n'
result += f'File {self.pos_start.fn}, line {self.pos_start.ln + 1}'
result += '\n\n' + string_with_arrows(self.pos_start.ftxt, self.pos_start, self.pos_end)
return result
class IllegalCharError(Error):
def __init__(self, pos_start, pos_end, details):
super().__init__(pos_start, pos_end, 'Illegal Character', details)
class ExpectedCharError(Error):
def __init__(self, pos_start, pos_end, details):
super().__init__(pos_start, pos_end, 'Expected Character', details)
class InvalidSyntaxError(Error):
def __init__(self, pos_start, pos_end, details=''):
super().__init__(pos_start, pos_end, 'Invalid Syntax', details)
class RTError(Error):
def __init__(self, pos_start, pos_end, details, context):
super().__init__(pos_start, pos_end, 'Runtime Error', details)
self.context = context
def as_string(self):
result = self.generate_traceback()
result += f'{self.error_name}: {self.details}'
result += '\n\n' + string_with_arrows(self.pos_start.ftxt, self.pos_start, self.pos_end)
return result
def generate_traceback(self):
result = ''
pos = self.pos_start
ctx = self.context
while ctx:
result = f' File {pos.fn}, line {str(pos.ln + 1)}, in {ctx.display_name}\n' + result
pos = ctx.parent_entry_pos
ctx = ctx.parent
return 'Traceback (most recent call last):\n' + result
#######################################
# POSITION
#######################################
class Position:
def __init__(self, idx, ln, col, fn, ftxt):
self.idx = idx
self.ln = ln
self.col = col
self.fn = fn
self.ftxt = ftxt
def advance(self, current_char=None):
self.idx += 1
self.col += 1
if current_char == '\n':
self.ln += 1
self.col = 0
return self
def copy(self):
return Position(self.idx, self.ln, self.col, self.fn, self.ftxt)
#######################################
# TOKENS
#######################################
TT_INT = 'INT'
TT_FLOAT = 'FLOAT'
TT_IDENTIFIER = 'IDENTIFIER'
TT_KEYWORD = 'KEYWORD'
TT_PLUS = 'PLUS'
TT_MINUS = 'MINUS'
TT_MUL = 'MUL'
TT_DIV = 'DIV'
TT_POW = 'POW'
TT_EQ = 'EQ'
TT_LPAREN = 'LPAREN'
TT_RPAREN = 'RPAREN'
TT_EE = 'EE'
TT_NE = 'NE'
TT_LT = 'LT'
TT_GT = 'GT'
TT_LTE = 'LTE'
TT_GTE = 'GTE'
TT_COMMA = 'COMMA'
TT_ARROW = 'ARROW'
TT_EOF = 'EOF'
KEYWORDS = [
'VAR',
'AND',
'OR',
'NOT',
'IF',
'ELIF',
'ELSE',
'FOR',
'TO',
'STEP',
'WHILE',
'FUN',
'THEN'
]
class Token:
def __init__(self, type_, value=None, pos_start=None, pos_end=None):
self.type = type_
self.value = value
if pos_start:
self.pos_start = pos_start.copy()
self.pos_end = pos_start.copy()
self.pos_end.advance()
if pos_end:
self.pos_end = pos_end.copy()
def matches(self, type_, value):
return self.type == type_ and self.value == value
def __repr__(self):
if self.value: return f'{self.type}:{self.value}'
return f'{self.type}'
#######################################
# LEXER
#######################################
class Lexer:
def __init__(self, fn, text):
self.fn = fn
self.text = text
self.pos = Position(-1, 0, -1, fn, text)
self.current_char = None
self.advance()
def advance(self):
self.pos.advance(self.current_char)
self.current_char = self.text[self.pos.idx] if self.pos.idx < len(self.text) else None
def make_tokens(self):
tokens = []
while self.current_char != None:
if self.current_char in ' \t':
self.advance()
elif self.current_char in DIGITS:
tokens.append(self.make_number())
elif self.current_char in LETTERS:
tokens.append(self.make_identifier())
elif self.current_char == '+':
tokens.append(Token(TT_PLUS, pos_start=self.pos))
self.advance()
elif self.current_char == '-':
tokens.append(self.make_minus_or_arrow())
elif self.current_char == '*':
tokens.append(Token(TT_MUL, pos_start=self.pos))
self.advance()
elif self.current_char == '/':
tokens.append(Token(TT_DIV, pos_start=self.pos))
self.advance()
elif self.current_char == '^':
tokens.append(Token(TT_POW, pos_start=self.pos))
self.advance()
elif self.current_char == '(':
tokens.append(Token(TT_LPAREN, pos_start=self.pos))
self.advance()
elif self.current_char == ')':
tokens.append(Token(TT_RPAREN, pos_start=self.pos))
self.advance()
elif self.current_char == '!':
token, error = self.make_not_equals()
if error: return [], error
tokens.append(token)
elif self.current_char == '=':
tokens.append(self.make_equals())
elif self.current_char == '<':
tokens.append(self.make_less_than())
elif self.current_char == '>':
tokens.append(self.make_greater_than())
elif self.current_char == ',':
tokens.append(Token(TT_COMMA, pos_start=self.pos))
self.advance()
else:
pos_start = self.pos.copy()
char = self.current_char
self.advance()
return [], IllegalCharError(pos_start, self.pos, "'" + char + "'")
tokens.append(Token(TT_EOF, pos_start=self.pos))
return tokens, None
def make_number(self):
|
num_str = ''
dot_count = 0
pos_start = self.pos.copy()
while self.current_char != None and self.current_char in DIGITS + '.':
if self.current_char == '.':
if dot_count == 1: break
dot_count += 1
num_str += self.current_char
self.advance()
if dot_count == 0:
return Token(TT_INT, int(num_str), pos_start, self.pos)
else:
return Token(TT_FLOAT, float(num_str), pos_start, self.pos)
def make_identifier(self):
id_str = ''
pos_start = self.pos.copy()
while self.current_char != None and self.current_char in LETTERS_DIGITS + '_':
id_str += self.current_char
self.advance()
tok_type = TT_KEYWORD if id_str in KEYWORDS else TT_IDENTIFIER
return Token(tok_type, id_str, pos_start, self.pos)
def make_minus_or_arrow(self):
tok_type = TT_MINUS
pos_start = self.pos.copy()
self.advance()
if self.current_char == '>':
self.advance()
tok_type = TT_ARROW
return Token(tok_type, pos_start=pos_start, pos_end=self.pos)
def make_not_equals(self):
pos_start = self.pos.copy()
self.advance()
if self.current_char == '=':
self.advance()
return Token(TT_NE, pos_start=pos_start, pos_end=self.pos), None
self.advance()
return None, ExpectedCharError(pos_start, self.pos, "'=' (after '!')")
def make_equals(self):
tok_type = TT_EQ
pos_start = self.pos.copy()
self.advance()
if self.current_char == '=':
self.advance()
tok_type = TT_EE
return Token(tok_type, pos_start=pos_start, pos_end=self.pos)
def make_less_than(self):
tok_type = TT_LT
pos_start = self.pos.copy()
self.advance()
if self.current_char == '=':
self.advance()
tok_type = TT_LTE
return Token(tok_type, pos_start=pos_start, pos_end=self.pos)
def make_greater_than(self):
tok_type = TT_GT
pos_start = self.pos.copy()
self.advance()
if self.current_char == '=':
self.advance()
tok_type = TT_GTE
return Token(tok_type, pos_start=pos_start, pos_end=self.pos)
#######################################
# NODES
#######################################
class NumberNode:
def __init__(self, tok):
self.tok = tok
self.pos_start = self.tok.pos_start
self.pos_end = self.tok.pos_end
def __repr__(self):
return f'{self.tok}'
class VarAccessNode:
def __init__(self, var_name_tok):
self.var_name_tok = var_name_tok
self.pos_start = self.var_name_tok.pos_start
self.pos_end = self.var_name_tok.pos_end
class VarAssignNode:
def __init__(self, var_name_tok, value_node):
self.var_name_tok = var_name_tok
self.value_node = value_node
self.pos_start = self.var_name_tok.pos_start
self.pos_end = self.value_node.pos_end
class BinOpNode:
def __init__(self, left_node, op_tok, right_node):
self.left_node = left_node
self.op_tok = op_tok
self.right_node = right_node
self.pos_start = self.left_node.pos_start
self.pos_end = self.right_node.pos_end
def __repr__(self):
return f'({self.left_node}, {self.op_tok}, {self.right_node})'
class UnaryOpNode:
def __init__(self, op_tok, node):
self.op_tok = op_tok
self.node = node
self.pos_start = self.op_tok.pos_start
self.pos_end = node.pos_end
def __repr__(self):
return f'({self.op_tok}, {self.node})'
class IfNode:
def __init__(self, cases, else_case):
self.cases = cases
self.else_case = else_case
self.pos_start = self.cases[0][0].pos_start
self.pos_end = (self.else_case or self.cases[len(self.cases) - 1][0]).pos_end
class ForNode:
def __init__(self, var_name_tok, start_value_node, end_value_node, step_value_node, body_node):
self.var_name_tok = var_name_tok
self.start_value_node = start_value_node
self.end_value_node = end_value_node
self.step_value_node = step_value_node
self.body_node = body_node
self.pos_start = self.var_name_tok.pos_start
self.pos_end = self.body_node.pos_end
class WhileNode:
def __init__(self, condition_node, body_node):
self.condition_node = condition_node
self.body_node = body_node
self.pos_start = self.condition_node.pos_start
self.pos_end = self.body_node.pos_end
class FuncDefNode:
def __init__(self, var_name_tok, arg_name_toks, body_node):
self.var_name_tok = var_name_tok
self.arg_name_toks = arg_name_toks
self.body_node = body_node
if self.var_name_tok:
self.pos_start = self.var_name_tok.pos_start
elif len(self.arg_name_toks) > 0:
self.pos_start = self.arg_name_toks[0].pos_start
else:
self.pos_start = self.body_node.pos_start
self.pos_end = self.body_node.pos_end
class CallNode:
def __init__(self, node_to_call, arg_nodes):
self.node_to_call = node_to_call
self.arg_nodes = arg_nodes
self.pos_start = self.node_to_call.pos_start
if len(self.arg_nodes) > 0:
self.pos_end = self.arg_nodes[len(self.arg_nodes) - 1].pos_end
else:
self.pos_end = self.node_to_call.pos_end
#######################################
# PARSE RESULT
#######################################
class ParseResult:
def __init__(self):
self.error = None
self.node = None
self.last_registered_advance_count = 0
self.advance_count = 0
def register_advancement(self):
self.last_registered_advance_count = 1
self.advance_count += 1
def register(self, res):
self.last_registered_advance_count = res.advance_count
self.advance_count += res.advance_count
if res.error: self.error = res.error
return res.node
def success(self, node):
self.node = node
return self
def failure(self, error):
if not self.error or self.last_registered_advance_count == 0:
self.error = error
return self
#######################################
# PARSER
#######################################
class Parser:
def __init__(self, tokens):
self.tokens = tokens
self.tok_idx = -1
self.advance()
def advance(self, ):
self.tok_idx += 1
if self.tok_idx < len(self.tokens):
self.current_tok = self.tokens[self.tok_idx]
return self.current_tok
def parse(self):
res = self.expr()
if not res.error and self.current_tok.type != TT_EOF:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected '+', '-', '*', '/', '^', '==', '!=', '<', '>', <=', '>=', 'AND' or 'OR'"
))
return res
###################################
def expr(self):
res = ParseResult()
if self.current_tok.matches(TT_KEYWORD, 'VAR'):
res.register_advancement()
self.advance()
if self.current_tok.type != TT_IDENTIFIER:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected identifier"
))
var_name = self.current_tok
res.register_advancement()
self.advance()
if self.current_tok.type != TT_EQ:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected '='"
))
res.register_advancement()
self.advance()
expr = res.register(self.expr())
if res.error: return res
return res.success(VarAssignNode(var_name, expr))
node = res.register(self.bin_op(self.comp_expr, ((TT_KEYWORD, 'AND'), (TT_KEYWORD, 'OR'))))
if res.error:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected 'VAR', 'IF', 'FOR', 'WHILE', 'FUN', int, float, identifier, '+', '-', '(' or 'NOT'"
))
return res.success(node)
def comp_expr(self):
res = ParseResult()
if self.current_tok.matches(TT_KEYWORD, 'NOT'):
op_tok = self.current_tok
res.register_advancement()
self.advance()
node = res.register(self.comp_expr())
if res.error: return res
return res.success(UnaryOpNode(op_tok, node))
node = res.register(self.bin_op(self.arith_expr, (TT_EE, TT_NE, TT_LT, TT_GT, TT_LTE, TT_GTE)))
if res.error:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected int, float, identifier, '+', '-', '(' or 'NOT'"
))
return res.success(node)
def arith_expr(self):
return self.bin_op(self.term, (TT_PLUS, TT_MINUS))
def term(self):
return self.bin_op(self.factor, (TT_MUL, TT_DIV))
def factor(self):
res = ParseResult()
tok = self.current_tok
if tok.type in (TT_PLUS, TT_MINUS):
res.register_advancement()
self.advance()
factor = res.register(self.factor())
if res.error: return res
return res.success(UnaryOpNode(tok, factor))
return self.power()
def power(self):
return self.bin_op(self.call, (TT_POW, ), self.factor)
def call(self):
res = ParseResult()
atom = res.register(self.atom())
if res.error: return res
if self.current_tok.type == TT_LPAREN:
res.register_advancement()
self.advance()
arg_nodes = []
if self.current_tok.type == TT_RPAREN:
res.register_advancement()
self.advance()
else:
arg_nodes.append(res.register(self.expr()))
if res.error:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ')', 'VAR', 'IF', 'FOR', 'WHILE', 'FUN', int, float, identifier, '+', '-', '(' or 'NOT'"
))
while self.current_tok.type == TT_COMMA:
res.register_advancement()
self.advance()
arg_nodes.append(res.register(self.expr()))
if res.error: return res
if self.current_tok.type != TT_RPAREN:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected ',' or ')'"
))
res.register_advancement()
self.advance()
return res.success(CallNode(atom, arg_nodes))
return res.success(atom)
def atom(self):
res = ParseResult()
tok = self.current_tok
if tok.type in (TT_INT, TT_FLOAT):
res.register_advancement()
self.advance()
return res.success(NumberNode(tok))
elif tok.type == TT_IDENTIFIER:
res.register_advancement()
self.advance()
return res.success(VarAccessNode(tok))
elif tok.type == TT_LPAREN:
res.register_advancement()
self.advance()
expr = res.register(self.expr())
if res.error: return res
if self.current_tok.type == TT_RPAREN:
res.register_advancement()
self.advance()
return res.success(expr)
else:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ')'"
))
elif tok.matches(TT_KEYWORD, 'IF'):
if_expr = res.register(self.if_expr())
if res.error: return res
return res.success(if_expr)
elif tok.matches(TT_KEYWORD, 'FOR'):
for_expr = res.register(self.for_expr())
if res.error: return res
return res.success(for_expr)
elif tok.matches(TT_KEYWORD, 'WHILE'):
while_expr = res.register(self.while_expr())
if res.error: return res
return res.success(while_expr)
elif tok.matches(TT_KEYWORD, 'FUN'):
func_def = res.register(self.func_def())
if res.error: return res
return res.success(func_def)
return res.failure(InvalidSyntaxError(
tok.pos_start, tok.pos_end,
"Expected int, float, identifier, '+', '-', '(', 'IF', 'FOR', 'WHILE', 'FUN'"
))
def if_expr(self):
res = ParseResult()
cases = []
else_case = None
if not self.current_tok.matches(TT_KEYWORD, 'IF'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'IF'"
))
res.register_advancement()
self.advance()
condition = res.register(self.expr())
if res.error: return res
if not self.current_tok.matches(TT_KEYWORD, 'THEN'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'THEN'"
))
res.register_advancement()
self.advance()
expr = res.register(self.expr())
if res.error: return res
cases.append((condition, expr))
while self.current_tok.matches(TT_KEYWORD, 'ELIF'):
res.register_advancement()
self.advance()
condition = res.register(self.expr())
if res.error: return res
if not self.current_tok.matches(TT_KEYWORD, 'THEN'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'THEN'"
))
res.register_advancement()
self.advance()
expr = res.register(self.expr())
if res.error: return res
cases.append((condition, expr))
if self.current_tok.matches(TT_KEYWORD, 'ELSE'):
res.register_advancement()
self.advance()
else_case = res.register(self.expr())
if res.error: return res
return res.success(IfNode(cases, else_case))
def for_expr(self):
res = ParseResult()
if not self.current_tok.matches(TT_KEYWORD, 'FOR'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'FOR'"
))
res.register_advancement()
self.advance()
if self.current_tok.type != TT_IDENTIFIER:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier"
))
var_name = self.current_tok
res.register_advancement()
self.advance()
if self.current_tok.type != TT_EQ:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '='"
))
res.register_advancement()
self.advance()
start_value = res.register(self.expr())
if res.error: return res
if not self.current_tok.matches(TT_KEYWORD, 'TO'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'TO'"
))
res.register_advancement()
self.advance()
end_value = res.register(self.expr())
if res.error: return res
if self.current_tok.matches(TT_KEYWORD, 'STEP'):
res.register_advancement()
self.advance()
step_value = res.register(self.expr())
if res.error: return res
else:
step_value = None
if not self.current_tok.matches(TT_KEYWORD, 'THEN'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'THEN'"
))
res.register_advancement()
self.advance()
body = res.register(self.expr())
if res.error: return res
return res.success(ForNode(var_name, start_value, end_value, step_value, body))
def while_expr(self):
res = ParseResult()
if not self.current_tok.matches(TT_KEYWORD, 'WHILE'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'WHILE'"
))
res.register_advancement()
self.advance()
condition = res.register(self.expr())
if res.error: return res
if not self.current_tok.matches(TT_KEYWORD, 'THEN'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'THEN'"
))
res.register_advancement()
self.advance()
body = res.register(self.expr())
if res.error: return res
return res.success(WhileNode(condition, body))
def func_def(self):
res = ParseResult()
if not self.current_tok.matches(TT_KEYWORD, 'FUN'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'FUN'"
))
res.register_advancement()
self.advance()
if self.current_tok.type == TT_IDENTIFIER:
var_name_tok = self.current_tok
res.register_advancement()
self.advance()
if self.current_tok.type != TT_LPAREN:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '('"
))
else:
var_name_tok = None
if self.current_tok.type != TT_LPAREN:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier or '('"
))
res.register_advancement()
self.advance()
arg_name_toks = []
if self.current_tok.type == TT_IDENTIFIER:
arg_name_toks.append(self.current_tok)
res.register_advancement()
self.advance()
while self.current_tok.type == TT_COMMA:
res.register_advancement()
self.advance()
if self.current_tok.type != TT_IDENTIFIER:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier"
))
arg_name_toks.append(self.current_tok)
res.register_advancement()
self.advance()
if self.current_tok.type != TT_RPAREN:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected ',' or ')'"
))
else:
if self.current_tok.type != TT_RPAREN:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier or ')'"
))
res.register_advancement()
self.advance()
if self.current_tok.type != TT_ARROW:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '->'"
))
res.register_advancement()
self.advance()
node_to_return = res.register(self.expr())
if res.error: return res
return res.success(FuncDefNode(
var_name_tok,
arg_name_toks,
node_to_return
))
###################################
def bin_op(self, func_a, ops, func_b=None):
if func_b == None:
func_b = func_a
res = ParseResult()
left = res.register(func_a())
if res.error: return res
while self.current_tok.type in ops or (self.current_tok.type, self.current_tok.value) in ops:
op_tok = self.current_tok
res.register_advancement()
self.advance()
right = res.register(func_b())
if res.error: return res
left = BinOpNode(left, op_tok, right)
return res.success(left)
#######################################
# RUNTIME RESULT
#######################################
class RTResult:
def __init__(self):
self.value = None
self.error = None
def register(self, res):
self.error = res.error
return res.value
def success(self, value):
self.value = value
return self
def failure(self, error):
self.error = error
return self
#######################################
# VALUES
#######################################
class Value:
def __init__(self):
self.set_pos()
self.set_context()
def set_pos(self, pos_start=None, pos_end=None):
self.pos_start = pos_start
self.pos_end = pos_end
return self
def set_context(self, context=None):
self.context = context
return self
def added_to(self, other):
return None, self.illegal_operation(other)
def subbed_by(self, other):
return None, self.illegal_operation(other)
def multed_by(self, other):
return None, self.illegal_operation(other)
def dived_by(self, other):
return None, self.illegal_operation(other)
def powed_by(self, other):
return None, self.illegal_operation(other)
def get_comparison_eq(self, other):
return None, self.illegal_operation(other)
def get_comparison_ne(self, other):
return None, self.illegal_operation(other)
def get_comparison_lt(self, other):
return None, self.illegal_operation(other)
def get_comparison_gt(self, other):
return None, self.illegal_operation(other)
def get_comparison_lte(self, other):
return None, self.illegal_operation(other)
def get_comparison_gte(self, other):
return None, self.illegal_operation(other)
def anded_by(self, other):
return None, self.illegal_operation(other)
def ored_by(self, other):
return None, self.illegal_operation(other)
def notted(self):
return None, self.illegal_operation(other)
def execute(self, args):
return RTResult().failure(self.illegal_operation())
def copy(self):
raise Exception('No copy method defined')
def is_true(self):
return False
def illegal_operation(self, other=None):
if not other: other = self
return RTError(
self.pos_start, other.pos_end,
'Illegal operation',
self.context
)
class Number(Value):
def __init__(self, value):
super().__init__()
self.value = value
def added_to(self, other):
if isinstance(other, Number):
return Number(self.value + other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def subbed_by(self, other):
if isinstance(other, Number):
return Number(self.value - other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def multed_by(self, other):
if isinstance(other, Number):
return Number(self.value * other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def dived_by(self, other):
if isinstance(other, Number):
if other.value == 0:
return None, RTError(
other.pos_start, other.pos_end,
'Division by zero',
self.context
)
return Number(self.value / other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def powed_by(self, other):
if isinstance(other, Number):
return Number(self.value ** other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_eq(self, other):
if isinstance(other, Number):
return Number(int(self.value == other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_ne(self, other):
if isinstance(other, Number):
return Number(int(self.value != other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_lt(self, other):
if isinstance(other, Number):
return Number(int(self.value < other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_gt(self, other):
if isinstance(other, Number):
return Number(int(self.value > other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_lte(self, other):
if isinstance(other, Number):
return Number(int(self.value <= other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_gte(self, other):
if isinstance(other, Number):
return Number(int(self.value >= other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def anded_by(self, other):
if isinstance(other, Number):
return Number(int(self.value and other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def ored_by(self, other):
if isinstance(other, Number):
return Number(int(self.value or other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def notted(self):
return Number(1 if self.value == 0 else 0).set_context(self.context), None
def copy(self):
copy = Number(self.value)
copy.set_pos(self.pos_start, self.pos_end)
copy.set_context(self.context)
return copy
def is_true(self):
return self.value != 0
def __repr__(self):
return str(self.value)
class Function(Value):
def __init__(self, name, body_node, arg_names):
super().__init__()
self.name = name or "<anonymous>"
self.body_node = body_node
self.arg_names = arg_names
def execute(self, args):
res = RTResult()
interpreter = Interpreter()
new_context = Context(self.name, self.context, self.pos_start)
new_context.symbol_table = SymbolTable(new_context.parent.symbol_table)
if len(args) > len(self.arg_names):
return res.failure(RTError(
self.pos_start, self.pos_end,
f"{len(args) - len(self.arg_names)} too many args passed into '{self.name}'",
self.context
))
if len(args) < len(self.arg_names):
return res.failure(RTError(
self.pos_start, self.pos_end,
f"{len(self.arg_names) - len(args)} too few args passed into '{self.name}'",
self.context
))
for i in range(len(args)):
arg_name = self.arg_names[i]
arg_value = args[i]
arg_value.set_context(new_context)
new_context.symbol_table.set(arg_name, arg_value)
value = res.register(interpreter.visit(self.body_node, new_context))
if res.error: return res
return res.success(value)
def copy(self):
copy = Function(self.name, self.body_node, self.arg_names)
copy.set_context(self.context)
copy.set_pos(self.pos_start, self.pos_end)
return copy
def __repr__(self):
return f"<function {self.name}>"
#######################################
# CONTEXT
#######################################
class Context:
def __init__(self, display_name, parent=None, parent_entry_pos=None):
self.display_name = display_name
self.parent = parent
self.parent_entry_pos = parent_entry_pos
self.symbol_table = None
#######################################
# SYMBOL TABLE
#######################################
class SymbolTable:
def __init__(self, parent=None):
self.symbols = {}
self.parent = parent
def get(self, name):
value = self.symbols.get(name, None)
if value == None and self.parent:
return self.parent.get(name)
return value
def set(self, name, value):
self.symbols[name] = value
def remove(self, name):
del self.symbols[name]
#######################################
# INTERPRETER
#######################################
class Interpreter:
def visit(self, node, context):
method_name = f'visit_{type(node).__name__}'
method = getattr(self, method_name, self.no_visit_method)
return method(node, context)
def no_visit_method(self, node, context):
raise Exception(f'No visit_{type(node).__name__} method defined')
###################################
def visit_NumberNode(self, node, context):
return RTResult().success(
Number(node.tok.value).set_context(context).set_pos(node.pos_start, node.pos_end)
)
def visit_VarAccessNode(self, node, context):
res = RTResult()
var_name = node.var_name_tok.value
value = context.symbol_table.get(var_name)
if not value:
return res.failure(RTError(
node.pos_start, node.pos_end,
f"'{var_name}' is not defined",
context
))
value = value.copy().set_pos(node.pos_start, node.pos_end)
return res.success(value)
def visit_VarAssignNode(self, node, context):
res = RTResult()
var_name = node.var_name_tok.value
value = res.register(self.visit(node.value_node, context))
if res.error: return res
context.symbol_table.set(var_name, value)
return res.success(value)
def visit_BinOpNode(self, node, context):
res = RTResult()
left = res.register(self.visit(node.left_node, context))
if res.error: return res
right = res.register(self.visit(node.right_node, context))
if res.error: return res
if node.op_tok.type == TT_PLUS:
result, error = left.added_to(right)
elif node.op_tok.type == TT_MINUS:
result, error = left.subbed_by(right)
elif node.op_tok.type == TT_MUL:
result, error = left.multed_by(right)
elif node.op_tok.type == TT_DIV:
result, error = left.dived_by(right)
elif node.op_tok.type == TT_POW:
result, error = left.powed_by(right)
elif node.op_tok.type == TT_EE:
result, error = left.get_comparison_eq(right)
elif node.op_tok.type == TT_NE:
result, error = left.get_comparison_ne(right)
elif node.op_tok.type == TT_LT:
result, error = left.get_comparison_lt(right)
elif node.op_tok.type == TT_GT:
result, error = left.get_comparison_gt(right)
elif node.op_tok.type == TT_LTE:
result, error = left.get_comparison_lte(right)
elif node.op_tok.type == TT_GTE:
result, error = left.get_comparison_gte(right)
elif node.op_tok.matches(TT_KEYWORD, 'AND'):
result, error = left.anded_by(right)
elif node.op_tok.matches(TT_KEYWORD, 'OR'):
result, error = left.ored_by(right)
if error:
return res.failure(error)
else:
return res.success(result.set_pos(node.pos_start, node.pos_end))
def visit_UnaryOpNode(self, node, context):
res = RTResult()
number = res.register(self.visit(node.node, context))
if res.error: return res
error = None
if node.op_tok.type == TT_MINUS:
number, error = number.multed_by(Number(-1))
elif node.op_tok.matches(TT_KEYWORD, 'NOT'):
number, error = number.notted()
if error:
return res.failure(error)
else:
return res.success(number.set_pos(node.pos_start, node.pos_end))
def visit_IfNode(self, node, context):
res = RTResult()
for condition, expr in node.cases:
condition_value = res.register(self.visit(condition, context))
if res.error: return res
if condition_value.is_true():
expr_value = res.register(self.visit(expr, context))
if res.error: return res
return res.success(expr_value)
if node.else_case:
else_value = res.register(self.visit(node.else_case, context))
if res.error: return res
return res.success(else_value)
return res.success(None)
def visit_ForNode(self, node, context):
res = RTResult()
start_value = res.register(self.visit(node.start_value_node, context))
if res.error: return res
end_value = res.register(self.visit(node.end_value_node, context))
if res.error: return res
if node.step_value_node:
step_value = res.register(self.visit(node.step_value_node, context))
if res.error: return res
else:
step_value = Number(1)
i = start_value.value
if step_value.value >= 0:
condition = lambda: i < end_value.value
else:
condition = lambda: i > end_value.value
while condition():
context.symbol_table.set(node.var_name_tok.value, Number(i))
i += step_value.value
res.register(self.visit(node.body_node, context))
if res.error: return res
return res.success(None)
def visit_WhileNode(self, node, context):
res = RTResult()
while True:
condition = res.register(self.visit(node.condition_node, context))
if res.error: return res
if not condition.is_true(): break
res.register(self.visit(node.body_node, context))
if res.error: return res
return res.success(None)
def visit_FuncDefNode(self, node, context):
res = RTResult()
func_name = node.var_name_tok.value if node.var_name_tok else None
body_node = node.body_node
arg_names = [arg_name.value for arg_name in node.arg_name_toks]
func_value = Function(func_name, body_node, arg_names).set_context(context).set_pos(node.pos_start, node.pos_end)
if node.var_name_tok:
context.symbol_table.set(func_name, func_value)
return res.success(func_value)
def visit_CallNode(self, node, context):
res = RTResult()
args = []
value_to_call = res.register(self.visit(node.node_to_call, context))
if res.error: return res
value_to_call = value_to_call.copy().set_pos(node.pos_start, node.pos_end)
for arg_node in node.arg_nodes:
args.append(res.register(self.visit(arg_node, context)))
if res.error: return res
return_value = res.register(value_to_call.execute(args))
if res.error: return res
return res.success(return_value)
#######################################
# RUN
#######################################
global_symbol_table = SymbolTable()
global_symbol_table.set("NULL", Number(0))
global_symbol_table.set("FALSE", Number(0))
global_symbol_table.set("TRUE", Number(1))
def run(fn, text):
# Generate tokens
lexer = Lexer(fn, text)
tokens, error = lexer.make_tokens()
if error: return None, error
# Generate AST
parser = Parser(tokens)
ast = parser.parse()
if ast.error: return None, ast.error
# Run program
interpreter = Interpreter()
context = Context('<program>')
context.symbol_table = global_symbol_table
result = interpreter.visit(ast.node, context)
return result.value, result.error
| |
processor.py
|
import os.path
import typing
import subprocess
import base64
from django.conf import settings
from django.core.files.storage import default_storage
from django.http import HttpResponse
from django.urls.base import resolve
from django.views.decorators.csrf import csrf_exempt
from drf_yasg.openapi import Parameter
from drf_yasg.utils import swagger_auto_schema
from rest_framework.filters import BaseFilterBackend
from rest_framework.response import Response
from rest_framework.schemas import coreapi
from rest_framework.views import APIView, Request
from rest_framework import status
from rest_framework.exceptions import ValidationError
from ..drf_auth_override import CsrfExemptSessionAuthentication
from ..utils import xresponse, get_pretty_logger, file_hash, ErrorCode, source_hash, encode_base64
from ..exceptions import ParamError
from ..serializers import ImgSerializer
from ..views import schema_utils
logger = get_pretty_logger('api:views')
class RequestImgFilterBackend(BaseFilterBackend):
def get_schema_fields(self, view):
return [
]
def validate_payload(serializer_class, payload: dict) -> dict:
img_serializer = serializer_class(data=payload)
img_serializer.is_valid(raise_exception=True)
clean_data = img_serializer.validated_data
name = ''.join(clean_data['source'].name.split('.')[:-1]).replace('.', '_').replace(' ', '_')
suffix = ''.join(clean_data['source'].name.split('.')[-1:])
filename = default_storage.save(f'{name}.{suffix}', clean_data['source'])
clean_data['filename'] = filename
clean_data['storage'] = default_storage.location
return clean_data
class ImgProcessAPIView(APIView):
filter_backends = (RequestImgFilterBackend,)
serializer_class = ImgSerializer
authentication_classes = (CsrfExemptSessionAuthentication,)
def process_request(self, clean_data, request):
raise NotImplementedError('not implemented')
@property
def return_format(self):
return ''
@swagger_auto_schema(operation_description="",
manual_parameters=[Parameter('output', in_='query', required=True, type='string')],
request_body=serializer_class,
responses={200: schema_utils.xresponse_ok(),
400: schema_utils.xresponse_nok()})
def post(self, request):
if 'output' not in request.query_params:
output = 'image'
else:
output = str(request.query_params['output']).lower()
supported_output_formats = ['image', 'url']
if output not in supported_output_formats:
return xresponse(
status=status.HTTP_400_BAD_REQUEST,
error_code=ErrorCode.InvalidParams,
msg=f'Unhandled output format. Selected: {output} available: [{", ".join(supported_output_formats)}]'
)
try:
clean_data = validate_payload(self.serializer_class, request.data)
except ParamError as e:
return xresponse(status.HTTP_400_BAD_REQUEST, e.error_code, e.msg)
try:
output_filepath, output_filename = self.process_request(clean_data, request)
if output == 'image':
with open(output_filepath, 'rb') as file:
return HttpResponse(content=file.read(), content_type=f'image/{self.return_format}')
else:
return HttpResponse(
status=status.HTTP_303_SEE_OTHER,
headers={
'Location': request.build_absolute_uri(f'{settings.MEDIA_URL}{output_filename}')
},
)
except Exception as e:
return xresponse(status.HTTP_400_BAD_REQUEST, ErrorCode.NotFound, e)
class Png2Tiff(ImgProcessAPIView):
@property
def return_format(self):
return 'tiff'
def process_request(self, clean_data, request):
# convert easy.png -set colorspace RGB -alpha extract easy_alpha.png
# convert easy_alpha.png easy_alpha.svg
# convert png to tiff
# gimp tiff with alpha.svg
input_filepath = os.path.join(clean_data['storage'], clean_data['filename'])
output_filename = f"{''.join(clean_data['filename'].split('.')[:-1])}.tiff"
output_filepath = os.path.join(clean_data['storage'], output_filename)
output_alpha_filepath = os.path.join(clean_data['storage'], f"{''.join(clean_data['filename'].split('.')[:-1])}_alpha.png")
command_extract_alpha = f'convert {input_filepath} -set colorspace RGB -alpha extract {output_alpha_filepath}'
output_svg_filepath = f'{"".join(output_alpha_filepath.split(".")[:-1])}.svg'
command_alpha_svg = f'convert {output_alpha_filepath} {output_svg_filepath}'
output_tiff_tmp_filepath = os.path.join(clean_data['storage'], f"{''.join(clean_data['filename'].split('.')[:-1])}_tmp.tiff")
command_png_to_tiff = f'convert {input_filepath} {output_tiff_tmp_filepath}'
logger.info(f'command: {command_extract_alpha}')
process = subprocess.Popen(
command_extract_alpha.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_alpha_svg}')
process = subprocess.Popen(
command_alpha_svg.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_png_to_tiff}')
process = subprocess.Popen(
command_png_to_tiff.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
gimp_command = f"gimp -i -b '(svg-clip-path \"{output_tiff_tmp_filepath}\" \"{output_svg_filepath}\" \"{output_filepath}\" )' -b '(gimp-quit 0)'"
logger.info(f'command: {gimp_command}')
process = subprocess.Popen(
gimp_command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(20)
logger.info(f'process resultcode: {process.returncode}')
os.remove(input_filepath)
os.remove(output_alpha_filepath)
os.remove(output_svg_filepath)
os.remove(output_tiff_tmp_filepath)
return output_filepath, output_filename
class Tiff2Png(ImgProcessAPIView):
@property
def return_format(self):
return 'png'
def process_request(self, clean_data, request):
input_filepath = os.path.join(clean_data['storage'], clean_data['filename'])
output_filename = f"{''.join(clean_data['filename'].split('.')[:-1])}.png"
output_filepath = os.path.join(clean_data['storage'], output_filename)
command = f'convert {input_filepath} -alpha transparent -clip -alpha opaque {output_filepath}'
process = subprocess.Popen(
command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'command: {command}')
logger.info(f'process resultcode: {process.returncode}')
os.remove(input_filepath)
return output_filepath, output_filename
class Eps2Png(ImgProcessAPIView):
@property
def return_format(self):
return 'png'
def process_request(self, clean_data, request):
input_filepath = os.path.join(clean_data['storage'], clean_data['filename'])
output_filename = f"{''.join(clean_data['filename'].split('.')[:-1])}.png"
output_filepath = os.path.join(clean_data['storage'], output_filename)
command = f'convert {input_filepath} -alpha transparent -clip -alpha opaque {output_filepath}'
process = subprocess.Popen(
command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
|
logger.info(f'command: {command}')
logger.info(f'process resultcode: {process.returncode}')
os.remove(input_filepath)
return output_filepath, output_filename
class Png2Eps(ImgProcessAPIView):
@property
def return_format(self):
return 'postscript'
def process_request(self, clean_data, request):
# TODO: convert png-alpha to svg
# convert easy.png -set colorspace RGB -alpha extract easy_alpha.png
# convert easy_alpha.png easy_alpha.svg
# convert png to tiff
# gimp tiff with alpha.svg
input_filepath = os.path.join(clean_data['storage'], clean_data['filename'])
output_filename = f"{''.join(clean_data['filename'].split('.')[:-1])}.eps"
output_filepath = os.path.join(clean_data['storage'], output_filename)
output_alpha_filepath = os.path.join(clean_data['storage'], f"{''.join(clean_data['filename'].split('.')[:-1])}_alpha.png")
command_extract_alpha = f'convert {input_filepath} -set colorspace RGB -alpha extract {output_alpha_filepath}'
output_svg_filepath = f'{"".join(output_alpha_filepath.split(".")[:-1])}.svg'
command_alpha_svg = f'convert {output_alpha_filepath} {output_svg_filepath}'
output_tiff_tmp_filepath = os.path.join(clean_data['storage'], f"{''.join(clean_data['filename'].split('.')[:-1])}_tmp.tiff")
output_filepath_tiff = os.path.join(clean_data['storage'], f"{''.join(clean_data['filename'].split('.')[:-1])}_final.tiff")
command_png_to_tiff = f'convert {input_filepath} {output_tiff_tmp_filepath}'
command_tiff_to_eps = f'convert {output_filepath_tiff} {output_filepath}'
logger.info(f'command: {command_extract_alpha}')
process = subprocess.Popen(
command_extract_alpha.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_alpha_svg}')
process = subprocess.Popen(
command_alpha_svg.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_png_to_tiff}')
process = subprocess.Popen(
command_png_to_tiff.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
gimp_command = f"gimp -i -b '(svg-clip-path \"{output_tiff_tmp_filepath}\" \"{output_svg_filepath}\" \"{output_filepath_tiff}\" )' -b '(gimp-quit 0)'"
logger.info(f'command: {gimp_command}')
process = subprocess.Popen(
gimp_command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(20)
logger.info(f'process resultcode: {process.returncode}')
logger.info(f'command: {command_tiff_to_eps}')
process = subprocess.Popen(
command_tiff_to_eps.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
process.wait(10)
logger.info(f'process resultcode: {process.returncode}')
os.remove(input_filepath)
os.remove(output_alpha_filepath)
os.remove(output_svg_filepath)
os.remove(output_tiff_tmp_filepath)
os.remove(output_filepath_tiff)
return output_filepath, output_filename
| |
read_epg_json.py
|
import sys
import json
from datetime import datetime
lastEnd=0
|
p=data['details'][time]
print('{0} {1} {2}-{3}'.format(
datetime.utcfromtimestamp(p['unixTimeBegin']).strftime('%Y-%m-%d %H:%M'),
datetime.utcfromtimestamp(p['unixTimeEnd']-p['unixTimeBegin']).strftime('%H:%M'),
p['name'],
p['title']
)
)
if lastEnd != p['unixTimeBegin']:
print('--------------------')
lastEnd=p['unixTimeEnd']
|
with open(sys.argv[1]) as json_file:
data = json.load(json_file)
times=sorted(list(data['details']))
for time in times:
|
horizontal_pod_autoscaler_condition.rs
|
// Generated from definition io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerCondition
/// HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct HorizontalPodAutoscalerCondition {
/// lastTransitionTime is the last time the condition transitioned from one status to another
pub last_transition_time: Option<crate::apimachinery::pkg::apis::meta::v1::Time>,
/// message is a human-readable explanation containing details about the transition
pub message: Option<String>,
/// reason is the reason for the condition's last transition.
pub reason: Option<String>,
/// status is the status of the condition (True, False, Unknown)
pub status: String,
/// type describes the current condition
pub type_: String,
}
impl<'de> crate::serde::Deserialize<'de> for HorizontalPodAutoscalerCondition {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_last_transition_time,
Key_message,
Key_reason,
Key_status,
Key_type_,
Other,
}
impl<'de> crate::serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
struct Visitor;
impl<'de> crate::serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn
|
(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error {
Ok(match v {
"lastTransitionTime" => Field::Key_last_transition_time,
"message" => Field::Key_message,
"reason" => Field::Key_reason,
"status" => Field::Key_status,
"type" => Field::Key_type_,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> crate::serde::de::Visitor<'de> for Visitor {
type Value = HorizontalPodAutoscalerCondition;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("HorizontalPodAutoscalerCondition")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> {
let mut value_last_transition_time: Option<crate::apimachinery::pkg::apis::meta::v1::Time> = None;
let mut value_message: Option<String> = None;
let mut value_reason: Option<String> = None;
let mut value_status: Option<String> = None;
let mut value_type_: Option<String> = None;
while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_last_transition_time => value_last_transition_time = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_message => value_message = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_reason => value_reason = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_status => value_status = Some(crate::serde::de::MapAccess::next_value(&mut map)?),
Field::Key_type_ => value_type_ = Some(crate::serde::de::MapAccess::next_value(&mut map)?),
Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(HorizontalPodAutoscalerCondition {
last_transition_time: value_last_transition_time,
message: value_message,
reason: value_reason,
status: value_status.ok_or_else(|| crate::serde::de::Error::missing_field("status"))?,
type_: value_type_.ok_or_else(|| crate::serde::de::Error::missing_field("type"))?,
})
}
}
deserializer.deserialize_struct(
"HorizontalPodAutoscalerCondition",
&[
"lastTransitionTime",
"message",
"reason",
"status",
"type",
],
Visitor,
)
}
}
impl crate::serde::Serialize for HorizontalPodAutoscalerCondition {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer {
let mut state = serializer.serialize_struct(
"HorizontalPodAutoscalerCondition",
2 +
self.last_transition_time.as_ref().map_or(0, |_| 1) +
self.message.as_ref().map_or(0, |_| 1) +
self.reason.as_ref().map_or(0, |_| 1),
)?;
if let Some(value) = &self.last_transition_time {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "lastTransitionTime", value)?;
}
if let Some(value) = &self.message {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "message", value)?;
}
if let Some(value) = &self.reason {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "reason", value)?;
}
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "status", &self.status)?;
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "type", &self.type_)?;
crate::serde::ser::SerializeStruct::end(state)
}
}
|
expecting
|
health.go
|
// Package health exposes health check handlers.
package health
import (
"net/http"
"github.com/gin-gonic/gin"
)
// Controller handles health check requests.
type Controller struct{}
// Ping godoc
// @Summary ping function
// @Description run health-check ping request
// @Tags health
// @Accept json
// @Produce json
|
c.String(http.StatusOK, "pong")
}
|
// @Success 200 {string} pong
// @Router /health/ping [get]
// Respond to user ping request.
func (p *Controller) Ping(c *gin.Context) {
|
iterator_test.go
|
package kivik
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"testing"
"time"
"github.com/flimzy/diff"
"github.com/flimzy/testy"
)
type TestFeed struct {
max int64
i int64
closeErr error
}
var _ iterator = &TestFeed{}
func (f *TestFeed) Close() error { return f.closeErr }
func (f *TestFeed) Next(ifce interface{}) error {
i, ok := ifce.(*int64)
if ok {
*i = f.i
f.i++
if f.i > f.max {
return io.EOF
}
time.Sleep(5 * time.Millisecond)
return nil
}
panic(fmt.Sprintf("unknown type: %T", ifce))
}
func TestIterator(t *testing.T)
|
func TestCancelledIterator(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
defer cancel()
iter := newIterator(ctx, &TestFeed{max: 10000}, func() interface{} { var i int64; return &i }())
for iter.Next() {
}
if err := iter.Err(); err.Error() != "context deadline exceeded" {
t.Errorf("Unexpected error: %s", err)
}
}
func TestIteratorScan(t *testing.T) {
type Test struct {
name string
dst interface{}
input json.RawMessage
expected interface{}
status int
err string
}
tests := []Test{
{
name: "non-pointer",
dst: map[string]string{},
input: []byte(`{"foo":123.4}`),
status: http.StatusBadRequest,
err: "kivik: destination is not a pointer",
},
func() Test {
dst := map[string]interface{}{}
expected := map[string]interface{}{"foo": 123.4}
return Test{
name: "standard unmarshal",
dst: &dst,
input: []byte(`{"foo":123.4}`),
expected: &expected,
}
}(),
func() Test {
dst := map[string]interface{}{}
return Test{
name: "invalid JSON",
dst: &dst,
input: []byte(`invalid JSON`),
status: http.StatusInternalServerError,
err: "invalid character 'i' looking for beginning of value",
}
}(),
func() Test {
var dst *json.RawMessage
return Test{
name: "nil *json.RawMessage",
dst: dst,
input: []byte(`{"foo":123.4}`),
status: http.StatusBadRequest,
err: "kivik: destination pointer is nil",
}
}(),
func() Test {
var dst *[]byte
return Test{
name: "nil *[]byte",
dst: dst,
input: []byte(`{"foo":123.4}`),
status: http.StatusBadRequest,
err: "kivik: destination pointer is nil",
}
}(),
func() Test {
dst := []byte{}
expected := []byte(`{"foo":123.4}`)
return Test{
name: "[]byte",
dst: &dst,
input: []byte(`{"foo":123.4}`),
expected: &expected,
}
}(),
func() Test {
dst := json.RawMessage{}
expected := json.RawMessage(`{"foo":123.4}`)
return Test{
name: "json.RawMessage",
dst: &dst,
input: []byte(`{"foo":123.4}`),
expected: &expected,
}
}(),
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
err := scan(test.dst, test.input)
testy.StatusError(t, test.err, test.status, err)
if d := diff.Interface(test.expected, test.dst); d != nil {
t.Error(d)
}
})
}
}
|
{
iter := newIterator(context.Background(), &TestFeed{max: 10}, func() interface{} { var i int64; return &i }())
expected := []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
result := []int64{}
for iter.Next() {
val, ok := iter.curVal.(*int64)
if !ok {
panic("Unexpected type")
}
result = append(result, *val)
}
if err := iter.Err(); err != nil {
t.Errorf("Unexpected error: %s", err)
}
if d := diff.AsJSON(expected, result); d != nil {
t.Errorf("Unexpected result:\n%s\n", d)
}
}
|
model_utils.py
|
"""
Ibutsu API
A system to store and query test results # noqa: E501
The version of the OpenAPI document: 1.13.4
Generated by: https://openapi-generator.tech
"""
from datetime import date, datetime # noqa: F401
import inspect
import io
import os
import pprint
import re
import tempfile
from dateutil.parser import parse
from ibutsu_client.exceptions import (
ApiKeyError,
ApiAttributeError,
ApiTypeError,
ApiValueError,
)
none_type = type(None)
file_type = io.IOBase
def convert_js_args_to_python_args(fn):
from functools import wraps
@wraps(fn)
def wrapped_init(_self, *args, **kwargs):
"""
An attribute named `self` received from the api will conflicts with the reserved `self`
parameter of a class method. During generation, `self` attributes are mapped
to `_self` in models. Here, we name `_self` instead of `self` to avoid conflicts.
"""
spec_property_naming = kwargs.get('_spec_property_naming', False)
if spec_property_naming:
kwargs = change_keys_js_to_python(kwargs, _self if isinstance(_self, type) else _self.__class__)
return fn(_self, *args, **kwargs)
return wrapped_init
class cached_property(object):
# this caches the result of the function call for fn with no inputs
# use this as a decorator on function methods that you want converted
# into cached properties
result_key = '_results'
def __init__(self, fn):
self._fn = fn
def __get__(self, instance, cls=None):
if self.result_key in vars(self):
return vars(self)[self.result_key]
else:
result = self._fn()
setattr(self, self.result_key, result)
return result
PRIMITIVE_TYPES = (list, float, int, bool, datetime, date, str, file_type)
def allows_single_value_input(cls):
"""
This function returns True if the input composed schema model or any
descendant model allows a value only input
This is true for cases where oneOf contains items like:
oneOf:
- float
- NumberWithValidation
- StringEnum
- ArrayModel
- null
TODO: lru_cache this
"""
if (
issubclass(cls, ModelSimple) or
cls in PRIMITIVE_TYPES
):
return True
elif issubclass(cls, ModelComposed):
if not cls._composed_schemas['oneOf']:
return False
return any(allows_single_value_input(c) for c in cls._composed_schemas['oneOf'])
return False
def composed_model_input_classes(cls):
"""
This function returns a list of the possible models that can be accepted as
inputs.
TODO: lru_cache this
"""
if issubclass(cls, ModelSimple) or cls in PRIMITIVE_TYPES:
return [cls]
elif issubclass(cls, ModelNormal):
if cls.discriminator is None:
return [cls]
else:
return get_discriminated_classes(cls)
elif issubclass(cls, ModelComposed):
if not cls._composed_schemas['oneOf']:
return []
if cls.discriminator is None:
input_classes = []
for c in cls._composed_schemas['oneOf']:
input_classes.extend(composed_model_input_classes(c))
return input_classes
else:
return get_discriminated_classes(cls)
return []
class OpenApiModel(object):
"""The base class for all OpenAPIModels"""
def set_attribute(self, name, value):
# this is only used to set properties on self
path_to_item = []
if self._path_to_item:
path_to_item.extend(self._path_to_item)
path_to_item.append(name)
if name in self.openapi_types:
required_types_mixed = self.openapi_types[name]
elif self.additional_properties_type is None:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
path_to_item
)
elif self.additional_properties_type is not None:
required_types_mixed = self.additional_properties_type
if get_simple_class(name) != str:
error_msg = type_error_message(
var_name=name,
var_value=name,
valid_classes=(str,),
key_type=True
)
raise ApiTypeError(
error_msg,
path_to_item=path_to_item,
valid_classes=(str,),
key_type=True
)
if self._check_type:
value = validate_and_convert_types(
value, required_types_mixed, path_to_item, self._spec_property_naming,
self._check_type, configuration=self._configuration)
if (name,) in self.allowed_values:
check_allowed_values(
self.allowed_values,
(name,),
value
)
if (name,) in self.validations:
check_validations(
self.validations,
(name,),
value,
self._configuration
)
self.__dict__['_data_store'][name] = value
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
def __setattr__(self, attr, value):
"""set the value of an attribute using dot notation: `instance.attr = val`"""
self[attr] = value
def __getattr__(self, attr):
"""get the value of an attribute using dot notation: `instance.attr`"""
return self.__getitem__(attr)
def __new__(cls, *args, **kwargs):
# this function uses the discriminator to
# pick a new schema/class to instantiate because a discriminator
# propertyName value was passed in
if len(args) == 1:
arg = args[0]
if arg is None and is_type_nullable(cls):
# The input data is the 'null' value and the type is nullable.
return None
if issubclass(cls, ModelComposed) and allows_single_value_input(cls):
model_kwargs = {}
oneof_instance = get_oneof_instance(cls, model_kwargs, kwargs, model_arg=arg)
return oneof_instance
visited_composed_classes = kwargs.get('_visited_composed_classes', ())
if (
cls.discriminator is None or
cls in visited_composed_classes
):
# Use case 1: this openapi schema (cls) does not have a discriminator
# Use case 2: we have already visited this class before and are sure that we
# want to instantiate it this time. We have visited this class deserializing
# a payload with a discriminator. During that process we traveled through
# this class but did not make an instance of it. Now we are making an
# instance of a composed class which contains cls in it, so this time make an instance of cls.
#
# Here's an example of use case 2: If Animal has a discriminator
# petType and we pass in "Dog", and the class Dog
# allOf includes Animal, we move through Animal
# once using the discriminator, and pick Dog.
# Then in the composed schema dog Dog, we will make an instance of the
# Animal class (because Dal has allOf: Animal) but this time we won't travel
# through Animal's discriminator because we passed in
# _visited_composed_classes = (Animal,)
return super(OpenApiModel, cls).__new__(cls)
# Get the name and value of the discriminator property.
# The discriminator name is obtained from the discriminator meta-data
# and the discriminator value is obtained from the input data.
discr_propertyname_py = list(cls.discriminator.keys())[0]
discr_propertyname_js = cls.attribute_map[discr_propertyname_py]
if discr_propertyname_js in kwargs:
discr_value = kwargs[discr_propertyname_js]
elif discr_propertyname_py in kwargs:
discr_value = kwargs[discr_propertyname_py]
else:
# The input data does not contain the discriminator property.
path_to_item = kwargs.get('_path_to_item', ())
raise ApiValueError(
"Cannot deserialize input data due to missing discriminator. "
"The discriminator property '%s' is missing at path: %s" %
(discr_propertyname_js, path_to_item)
)
# Implementation note: the last argument to get_discriminator_class
# is a list of visited classes. get_discriminator_class may recursively
# call itself and update the list of visited classes, and the initial
# value must be an empty list. Hence not using 'visited_composed_classes'
new_cls = get_discriminator_class(
cls, discr_propertyname_py, discr_value, [])
if new_cls is None:
path_to_item = kwargs.get('_path_to_item', ())
disc_prop_value = kwargs.get(
discr_propertyname_js, kwargs.get(discr_propertyname_py))
raise ApiValueError(
"Cannot deserialize input data due to invalid discriminator "
"value. The OpenAPI document has no mapping for discriminator "
"property '%s'='%s' at path: %s" %
(discr_propertyname_js, disc_prop_value, path_to_item)
)
if new_cls in visited_composed_classes:
# if we are making an instance of a composed schema Descendent
# which allOf includes Ancestor, then Ancestor contains
# a discriminator that includes Descendent.
# So if we make an instance of Descendent, we have to make an
# instance of Ancestor to hold the allOf properties.
# This code detects that use case and makes the instance of Ancestor
# For example:
# When making an instance of Dog, _visited_composed_classes = (Dog,)
# then we make an instance of Animal to include in dog._composed_instances
# so when we are here, cls is Animal
# cls.discriminator != None
# cls not in _visited_composed_classes
# new_cls = Dog
# but we know we know that we already have Dog
# because it is in visited_composed_classes
# so make Animal here
return super(OpenApiModel, cls).__new__(cls)
# Build a list containing all oneOf and anyOf descendants.
oneof_anyof_classes = None
if cls._composed_schemas is not None:
oneof_anyof_classes = (
cls._composed_schemas.get('oneOf', ()) +
cls._composed_schemas.get('anyOf', ()))
oneof_anyof_child = new_cls in oneof_anyof_classes
kwargs['_visited_composed_classes'] = visited_composed_classes + (cls,)
if cls._composed_schemas.get('allOf') and oneof_anyof_child:
# Validate that we can make self because when we make the
# new_cls it will not include the allOf validations in self
self_inst = super(OpenApiModel, cls).__new__(cls)
self_inst.__init__(*args, **kwargs)
new_inst = new_cls.__new__(new_cls, *args, **kwargs)
new_inst.__init__(*args, **kwargs)
return new_inst
@classmethod
@convert_js_args_to_python_args
def _new_from_openapi_data(cls, *args, **kwargs):
# this function uses the discriminator to
# pick a new schema/class to instantiate because a discriminator
# propertyName value was passed in
if len(args) == 1:
arg = args[0]
if arg is None and is_type_nullable(cls):
# The input data is the 'null' value and the type is nullable.
return None
if issubclass(cls, ModelComposed) and allows_single_value_input(cls):
model_kwargs = {}
oneof_instance = get_oneof_instance(cls, model_kwargs, kwargs, model_arg=arg)
return oneof_instance
visited_composed_classes = kwargs.get('_visited_composed_classes', ())
if (
cls.discriminator is None or
cls in visited_composed_classes
):
# Use case 1: this openapi schema (cls) does not have a discriminator
# Use case 2: we have already visited this class before and are sure that we
# want to instantiate it this time. We have visited this class deserializing
# a payload with a discriminator. During that process we traveled through
# this class but did not make an instance of it. Now we are making an
# instance of a composed class which contains cls in it, so this time make an instance of cls.
#
# Here's an example of use case 2: If Animal has a discriminator
# petType and we pass in "Dog", and the class Dog
# allOf includes Animal, we move through Animal
# once using the discriminator, and pick Dog.
# Then in the composed schema dog Dog, we will make an instance of the
# Animal class (because Dal has allOf: Animal) but this time we won't travel
# through Animal's discriminator because we passed in
# _visited_composed_classes = (Animal,)
return cls._from_openapi_data(*args, **kwargs)
# Get the name and value of the discriminator property.
# The discriminator name is obtained from the discriminator meta-data
# and the discriminator value is obtained from the input data.
discr_propertyname_py = list(cls.discriminator.keys())[0]
discr_propertyname_js = cls.attribute_map[discr_propertyname_py]
if discr_propertyname_js in kwargs:
discr_value = kwargs[discr_propertyname_js]
elif discr_propertyname_py in kwargs:
discr_value = kwargs[discr_propertyname_py]
else:
# The input data does not contain the discriminator property.
path_to_item = kwargs.get('_path_to_item', ())
raise ApiValueError(
"Cannot deserialize input data due to missing discriminator. "
"The discriminator property '%s' is missing at path: %s" %
(discr_propertyname_js, path_to_item)
)
# Implementation note: the last argument to get_discriminator_class
# is a list of visited classes. get_discriminator_class may recursively
# call itself and update the list of visited classes, and the initial
# value must be an empty list. Hence not using 'visited_composed_classes'
new_cls = get_discriminator_class(
cls, discr_propertyname_py, discr_value, [])
if new_cls is None:
path_to_item = kwargs.get('_path_to_item', ())
disc_prop_value = kwargs.get(
discr_propertyname_js, kwargs.get(discr_propertyname_py))
raise ApiValueError(
"Cannot deserialize input data due to invalid discriminator "
"value. The OpenAPI document has no mapping for discriminator "
"property '%s'='%s' at path: %s" %
(discr_propertyname_js, disc_prop_value, path_to_item)
)
if new_cls in visited_composed_classes:
# if we are making an instance of a composed schema Descendent
# which allOf includes Ancestor, then Ancestor contains
# a discriminator that includes Descendent.
# So if we make an instance of Descendent, we have to make an
# instance of Ancestor to hold the allOf properties.
# This code detects that use case and makes the instance of Ancestor
# For example:
# When making an instance of Dog, _visited_composed_classes = (Dog,)
# then we make an instance of Animal to include in dog._composed_instances
# so when we are here, cls is Animal
# cls.discriminator != None
# cls not in _visited_composed_classes
# new_cls = Dog
# but we know we know that we already have Dog
# because it is in visited_composed_classes
# so make Animal here
return cls._from_openapi_data(*args, **kwargs)
# Build a list containing all oneOf and anyOf descendants.
oneof_anyof_classes = None
if cls._composed_schemas is not None:
oneof_anyof_classes = (
cls._composed_schemas.get('oneOf', ()) +
cls._composed_schemas.get('anyOf', ()))
oneof_anyof_child = new_cls in oneof_anyof_classes
kwargs['_visited_composed_classes'] = visited_composed_classes + (cls,)
if cls._composed_schemas.get('allOf') and oneof_anyof_child:
# Validate that we can make self because when we make the
# new_cls it will not include the allOf validations in self
self_inst = cls._from_openapi_data(*args, **kwargs)
new_inst = new_cls._new_from_openapi_data(*args, **kwargs)
return new_inst
class ModelSimple(OpenApiModel):
"""the parent class of models whose type != object in their
swagger/openapi"""
def __setitem__(self, name, value):
"""set the value of an attribute using square-bracket notation: `instance[attr] = val`"""
if name in self.required_properties:
self.__dict__[name] = value
return
self.set_attribute(name, value)
def get(self, name, default=None):
"""returns the value of an attribute or some default value if the attribute was not set"""
if name in self.required_properties:
return self.__dict__[name]
return self.__dict__['_data_store'].get(name, default)
def __getitem__(self, name):
"""get the value of an attribute using square-bracket notation: `instance[attr]`"""
if name in self:
return self.get(name)
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
def __contains__(self, name):
"""used by `in` operator to check if an attrbute value was set in an instance: `'attr' in instance`"""
if name in self.required_properties:
return name in self.__dict__
return name in self.__dict__['_data_store']
def to_str(self):
"""Returns the string representation of the model"""
return str(self.value)
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, self.__class__):
return False
this_val = self._data_store['value']
that_val = other._data_store['value']
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
return vals_equal
class ModelNormal(OpenApiModel):
"""the parent class of models whose type == object in their
swagger/openapi"""
def __setitem__(self, name, value):
"""set the value of an attribute using square-bracket notation: `instance[attr] = val`"""
if name in self.required_properties:
self.__dict__[name] = value
return
self.set_attribute(name, value)
def get(self, name, default=None):
"""returns the value of an attribute or some default value if the attribute was not set"""
if name in self.required_properties:
return self.__dict__[name]
return self.__dict__['_data_store'].get(name, default)
def __getitem__(self, name):
"""get the value of an attribute using square-bracket notation: `instance[attr]`"""
if name in self:
return self.get(name)
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
def __contains__(self, name):
"""used by `in` operator to check if an attrbute value was set in an instance: `'attr' in instance`"""
if name in self.required_properties:
return name in self.__dict__
return name in self.__dict__['_data_store']
def to_dict(self):
"""Returns the model properties as a dict"""
return model_to_dict(self, serialize=False)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, self.__class__):
return False
if not set(self._data_store.keys()) == set(other._data_store.keys()):
return False
for _var_name, this_val in self._data_store.items():
that_val = other._data_store[_var_name]
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
if not vals_equal:
return False
return True
class ModelComposed(OpenApiModel):
"""the parent class of models whose type == object in their
swagger/openapi and have oneOf/allOf/anyOf
When one sets a property we use var_name_to_model_instances to store the value in
the correct class instances + run any type checking + validation code.
When one gets a property we use var_name_to_model_instances to get the value
from the correct class instances.
This allows multiple composed schemas to contain the same property with additive
constraints on the value.
_composed_schemas (dict) stores the anyOf/allOf/oneOf classes
key (str): allOf/oneOf/anyOf
value (list): the classes in the XOf definition.
Note: none_type can be included when the openapi document version >= 3.1.0
_composed_instances (list): stores a list of instances of the composed schemas
defined in _composed_schemas. When properties are accessed in the self instance,
they are returned from the self._data_store or the data stores in the instances
in self._composed_schemas
_var_name_to_model_instances (dict): maps between a variable name on self and
the composed instances (self included) which contain that data
key (str): property name
value (list): list of class instances, self or instances in _composed_instances
which contain the value that the key is referring to.
"""
def __setitem__(self, name, value):
"""set the value of an attribute using square-bracket notation: `instance[attr] = val`"""
if name in self.required_properties:
self.__dict__[name] = value
return
"""
Use cases:
1. additional_properties_type is None (additionalProperties == False in spec)
Check for property presence in self.openapi_types
if not present then throw an error
if present set in self, set attribute
always set on composed schemas
2. additional_properties_type exists
set attribute on self
always set on composed schemas
"""
if self.additional_properties_type is None:
"""
For an attribute to exist on a composed schema it must:
- fulfill schema_requirements in the self composed schema not considering oneOf/anyOf/allOf schemas AND
- fulfill schema_requirements in each oneOf/anyOf/allOf schemas
schema_requirements:
For an attribute to exist on a schema it must:
- be present in properties at the schema OR
- have additionalProperties unset (defaults additionalProperties = any type) OR
- have additionalProperties set
"""
if name not in self.openapi_types:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
# attribute must be set on self and composed instances
self.set_attribute(name, value)
for model_instance in self._composed_instances:
setattr(model_instance, name, value)
if name not in self._var_name_to_model_instances:
# we assigned an additional property
self.__dict__['_var_name_to_model_instances'][name] = self._composed_instances + [self]
return None
__unset_attribute_value__ = object()
def get(self, name, default=None):
"""returns the value of an attribute or some default value if the attribute was not set"""
if name in self.required_properties:
return self.__dict__[name]
# get the attribute from the correct instance
model_instances = self._var_name_to_model_instances.get(name)
values = []
# A composed model stores self and child (oneof/anyOf/allOf) models under
# self._var_name_to_model_instances.
# Any property must exist in self and all model instances
# The value stored in all model instances must be the same
if model_instances:
for model_instance in model_instances:
if name in model_instance._data_store:
v = model_instance._data_store[name]
if v not in values:
values.append(v)
len_values = len(values)
if len_values == 0:
return default
elif len_values == 1:
return values[0]
elif len_values > 1:
raise ApiValueError(
"Values stored for property {0} in {1} differ when looking "
"at self and self's composed instances. All values must be "
"the same".format(name, type(self).__name__),
[e for e in [self._path_to_item, name] if e]
)
def __getitem__(self, name):
"""get the value of an attribute using square-bracket notation: `instance[attr]`"""
value = self.get(name, self.__unset_attribute_value__)
if value is self.__unset_attribute_value__:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
return value
def __contains__(self, name):
"""used by `in` operator to check if an attrbute value was set in an instance: `'attr' in instance`"""
if name in self.required_properties:
return name in self.__dict__
model_instances = self._var_name_to_model_instances.get(
name, self._additional_properties_model_instances)
if model_instances:
for model_instance in model_instances:
if name in model_instance._data_store:
return True
return False
def to_dict(self):
"""Returns the model properties as a dict"""
return model_to_dict(self, serialize=False)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, self.__class__):
return False
if not set(self._data_store.keys()) == set(other._data_store.keys()):
return False
for _var_name, this_val in self._data_store.items():
that_val = other._data_store[_var_name]
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
if not vals_equal:
return False
return True
COERCION_INDEX_BY_TYPE = {
ModelComposed: 0,
ModelNormal: 1,
ModelSimple: 2,
none_type: 3, # The type of 'None'.
list: 4,
dict: 5,
float: 6,
int: 7,
bool: 8,
datetime: 9,
date: 10,
str: 11,
file_type: 12, # 'file_type' is an alias for the built-in 'file' or 'io.IOBase' type.
}
# these are used to limit what type conversions we try to do
# when we have a valid type already and we want to try converting
# to another type
UPCONVERSION_TYPE_PAIRS = (
(str, datetime),
(str, date),
(int, float), # A float may be serialized as an integer, e.g. '3' is a valid serialized float.
(list, ModelComposed),
(dict, ModelComposed),
(str, ModelComposed),
(int, ModelComposed),
(float, ModelComposed),
(list, ModelComposed),
(list, ModelNormal),
(dict, ModelNormal),
(str, ModelSimple),
(int, ModelSimple),
(float, ModelSimple),
(list, ModelSimple),
)
COERCIBLE_TYPE_PAIRS = {
False: ( # client instantiation of a model with client data
# (dict, ModelComposed),
# (list, ModelComposed),
# (dict, ModelNormal),
# (list, ModelNormal),
# (str, ModelSimple),
# (int, ModelSimple),
# (float, ModelSimple),
# (list, ModelSimple),
# (str, int),
# (str, float),
# (str, datetime),
# (str, date),
# (int, str),
# (float, str),
),
True: ( # server -> client data
(dict, ModelComposed),
(list, ModelComposed),
(dict, ModelNormal),
(list, ModelNormal),
(str, ModelSimple),
(int, ModelSimple),
(float, ModelSimple),
(list, ModelSimple),
# (str, int),
# (str, float),
(str, datetime),
(str, date),
# (int, str),
# (float, str),
(str, file_type)
),
}
def
|
(input_value):
"""Returns an input_value's simple class that we will use for type checking
Python2:
float and int will return int, where int is the python3 int backport
str and unicode will return str, where str is the python3 str backport
Note: float and int ARE both instances of int backport
Note: str_py2 and unicode_py2 are NOT both instances of str backport
Args:
input_value (class/class_instance): the item for which we will return
the simple class
"""
if isinstance(input_value, type):
# input_value is a class
return input_value
elif isinstance(input_value, tuple):
return tuple
elif isinstance(input_value, list):
return list
elif isinstance(input_value, dict):
return dict
elif isinstance(input_value, none_type):
return none_type
elif isinstance(input_value, file_type):
return file_type
elif isinstance(input_value, bool):
# this must be higher than the int check because
# isinstance(True, int) == True
return bool
elif isinstance(input_value, int):
return int
elif isinstance(input_value, datetime):
# this must be higher than the date check because
# isinstance(datetime_instance, date) == True
return datetime
elif isinstance(input_value, date):
return date
elif isinstance(input_value, str):
return str
return type(input_value)
def check_allowed_values(allowed_values, input_variable_path, input_values):
"""Raises an exception if the input_values are not allowed
Args:
allowed_values (dict): the allowed_values dict
input_variable_path (tuple): the path to the input variable
input_values (list/str/int/float/date/datetime): the values that we
are checking to see if they are in allowed_values
"""
these_allowed_values = list(allowed_values[input_variable_path].values())
if (isinstance(input_values, list)
and not set(input_values).issubset(
set(these_allowed_values))):
invalid_values = ", ".join(
map(str, set(input_values) - set(these_allowed_values))),
raise ApiValueError(
"Invalid values for `%s` [%s], must be a subset of [%s]" %
(
input_variable_path[0],
invalid_values,
", ".join(map(str, these_allowed_values))
)
)
elif (isinstance(input_values, dict)
and not set(
input_values.keys()).issubset(set(these_allowed_values))):
invalid_values = ", ".join(
map(str, set(input_values.keys()) - set(these_allowed_values)))
raise ApiValueError(
"Invalid keys in `%s` [%s], must be a subset of [%s]" %
(
input_variable_path[0],
invalid_values,
", ".join(map(str, these_allowed_values))
)
)
elif (not isinstance(input_values, (list, dict))
and input_values not in these_allowed_values):
raise ApiValueError(
"Invalid value for `%s` (%s), must be one of %s" %
(
input_variable_path[0],
input_values,
these_allowed_values
)
)
def is_json_validation_enabled(schema_keyword, configuration=None):
"""Returns true if JSON schema validation is enabled for the specified
validation keyword. This can be used to skip JSON schema structural validation
as requested in the configuration.
Args:
schema_keyword (string): the name of a JSON schema validation keyword.
configuration (Configuration): the configuration class.
"""
return (configuration is None or
not hasattr(configuration, '_disabled_client_side_validations') or
schema_keyword not in configuration._disabled_client_side_validations)
def check_validations(
validations, input_variable_path, input_values,
configuration=None):
"""Raises an exception if the input_values are invalid
Args:
validations (dict): the validation dictionary.
input_variable_path (tuple): the path to the input variable.
input_values (list/str/int/float/date/datetime): the values that we
are checking.
configuration (Configuration): the configuration class.
"""
if input_values is None:
return
current_validations = validations[input_variable_path]
if (is_json_validation_enabled('multipleOf', configuration) and
'multiple_of' in current_validations and
isinstance(input_values, (int, float)) and
not (float(input_values) / current_validations['multiple_of']).is_integer()):
# Note 'multipleOf' will be as good as the floating point arithmetic.
raise ApiValueError(
"Invalid value for `%s`, value must be a multiple of "
"`%s`" % (
input_variable_path[0],
current_validations['multiple_of']
)
)
if (is_json_validation_enabled('maxLength', configuration) and
'max_length' in current_validations and
len(input_values) > current_validations['max_length']):
raise ApiValueError(
"Invalid value for `%s`, length must be less than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['max_length']
)
)
if (is_json_validation_enabled('minLength', configuration) and
'min_length' in current_validations and
len(input_values) < current_validations['min_length']):
raise ApiValueError(
"Invalid value for `%s`, length must be greater than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['min_length']
)
)
if (is_json_validation_enabled('maxItems', configuration) and
'max_items' in current_validations and
len(input_values) > current_validations['max_items']):
raise ApiValueError(
"Invalid value for `%s`, number of items must be less than or "
"equal to `%s`" % (
input_variable_path[0],
current_validations['max_items']
)
)
if (is_json_validation_enabled('minItems', configuration) and
'min_items' in current_validations and
len(input_values) < current_validations['min_items']):
raise ValueError(
"Invalid value for `%s`, number of items must be greater than or "
"equal to `%s`" % (
input_variable_path[0],
current_validations['min_items']
)
)
items = ('exclusive_maximum', 'inclusive_maximum', 'exclusive_minimum',
'inclusive_minimum')
if (any(item in current_validations for item in items)):
if isinstance(input_values, list):
max_val = max(input_values)
min_val = min(input_values)
elif isinstance(input_values, dict):
max_val = max(input_values.values())
min_val = min(input_values.values())
else:
max_val = input_values
min_val = input_values
if (is_json_validation_enabled('exclusiveMaximum', configuration) and
'exclusive_maximum' in current_validations and
max_val >= current_validations['exclusive_maximum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value less than `%s`" % (
input_variable_path[0],
current_validations['exclusive_maximum']
)
)
if (is_json_validation_enabled('maximum', configuration) and
'inclusive_maximum' in current_validations and
max_val > current_validations['inclusive_maximum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value less than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['inclusive_maximum']
)
)
if (is_json_validation_enabled('exclusiveMinimum', configuration) and
'exclusive_minimum' in current_validations and
min_val <= current_validations['exclusive_minimum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value greater than `%s`" %
(
input_variable_path[0],
current_validations['exclusive_maximum']
)
)
if (is_json_validation_enabled('minimum', configuration) and
'inclusive_minimum' in current_validations and
min_val < current_validations['inclusive_minimum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value greater than or equal "
"to `%s`" % (
input_variable_path[0],
current_validations['inclusive_minimum']
)
)
flags = current_validations.get('regex', {}).get('flags', 0)
if (is_json_validation_enabled('pattern', configuration) and
'regex' in current_validations and
not re.search(current_validations['regex']['pattern'],
input_values, flags=flags)):
err_msg = r"Invalid value for `%s`, must match regular expression `%s`" % (
input_variable_path[0],
current_validations['regex']['pattern']
)
if flags != 0:
# Don't print the regex flags if the flags are not
# specified in the OAS document.
err_msg = r"%s with flags=`%s`" % (err_msg, flags)
raise ApiValueError(err_msg)
def order_response_types(required_types):
"""Returns the required types sorted in coercion order
Args:
required_types (list/tuple): collection of classes or instance of
list or dict with class information inside it.
Returns:
(list): coercion order sorted collection of classes or instance
of list or dict with class information inside it.
"""
def index_getter(class_or_instance):
if isinstance(class_or_instance, list):
return COERCION_INDEX_BY_TYPE[list]
elif isinstance(class_or_instance, dict):
return COERCION_INDEX_BY_TYPE[dict]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelComposed)):
return COERCION_INDEX_BY_TYPE[ModelComposed]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelNormal)):
return COERCION_INDEX_BY_TYPE[ModelNormal]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelSimple)):
return COERCION_INDEX_BY_TYPE[ModelSimple]
elif class_or_instance in COERCION_INDEX_BY_TYPE:
return COERCION_INDEX_BY_TYPE[class_or_instance]
raise ApiValueError("Unsupported type: %s" % class_or_instance)
sorted_types = sorted(
required_types,
key=lambda class_or_instance: index_getter(class_or_instance)
)
return sorted_types
def remove_uncoercible(required_types_classes, current_item, spec_property_naming,
must_convert=True):
"""Only keeps the type conversions that are possible
Args:
required_types_classes (tuple): tuple of classes that are required
these should be ordered by COERCION_INDEX_BY_TYPE
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
current_item (any): the current item (input data) to be converted
Keyword Args:
must_convert (bool): if True the item to convert is of the wrong
type and we want a big list of coercibles
if False, we want a limited list of coercibles
Returns:
(list): the remaining coercible required types, classes only
"""
current_type_simple = get_simple_class(current_item)
results_classes = []
for required_type_class in required_types_classes:
# convert our models to OpenApiModel
required_type_class_simplified = required_type_class
if isinstance(required_type_class_simplified, type):
if issubclass(required_type_class_simplified, ModelComposed):
required_type_class_simplified = ModelComposed
elif issubclass(required_type_class_simplified, ModelNormal):
required_type_class_simplified = ModelNormal
elif issubclass(required_type_class_simplified, ModelSimple):
required_type_class_simplified = ModelSimple
if required_type_class_simplified == current_type_simple:
# don't consider converting to one's own class
continue
class_pair = (current_type_simple, required_type_class_simplified)
if must_convert and class_pair in COERCIBLE_TYPE_PAIRS[spec_property_naming]:
results_classes.append(required_type_class)
elif class_pair in UPCONVERSION_TYPE_PAIRS:
results_classes.append(required_type_class)
return results_classes
def get_discriminated_classes(cls):
"""
Returns all the classes that a discriminator converts to
TODO: lru_cache this
"""
possible_classes = []
key = list(cls.discriminator.keys())[0]
if is_type_nullable(cls):
possible_classes.append(cls)
for discr_cls in cls.discriminator[key].values():
if hasattr(discr_cls, 'discriminator') and discr_cls.discriminator is not None:
possible_classes.extend(get_discriminated_classes(discr_cls))
else:
possible_classes.append(discr_cls)
return possible_classes
def get_possible_classes(cls, from_server_context):
# TODO: lru_cache this
possible_classes = [cls]
if from_server_context:
return possible_classes
if hasattr(cls, 'discriminator') and cls.discriminator is not None:
possible_classes = []
possible_classes.extend(get_discriminated_classes(cls))
elif issubclass(cls, ModelComposed):
possible_classes.extend(composed_model_input_classes(cls))
return possible_classes
def get_required_type_classes(required_types_mixed, spec_property_naming):
"""Converts the tuple required_types into a tuple and a dict described
below
Args:
required_types_mixed (tuple/list): will contain either classes or
instance of list or dict
spec_property_naming (bool): if True these values came from the
server, and we use the data types in our endpoints.
If False, we are client side and we need to include
oneOf and discriminator classes inside the data types in our endpoints
Returns:
(valid_classes, dict_valid_class_to_child_types_mixed):
valid_classes (tuple): the valid classes that the current item
should be
dict_valid_class_to_child_types_mixed (dict):
valid_class (class): this is the key
child_types_mixed (list/dict/tuple): describes the valid child
types
"""
valid_classes = []
child_req_types_by_current_type = {}
for required_type in required_types_mixed:
if isinstance(required_type, list):
valid_classes.append(list)
child_req_types_by_current_type[list] = required_type
elif isinstance(required_type, tuple):
valid_classes.append(tuple)
child_req_types_by_current_type[tuple] = required_type
elif isinstance(required_type, dict):
valid_classes.append(dict)
child_req_types_by_current_type[dict] = required_type[str]
else:
valid_classes.extend(get_possible_classes(required_type, spec_property_naming))
return tuple(valid_classes), child_req_types_by_current_type
def change_keys_js_to_python(input_dict, model_class):
"""
Converts from javascript_key keys in the input_dict to python_keys in
the output dict using the mapping in model_class.
If the input_dict contains a key which does not declared in the model_class,
the key is added to the output dict as is. The assumption is the model_class
may have undeclared properties (additionalProperties attribute in the OAS
document).
"""
if getattr(model_class, 'attribute_map', None) is None:
return input_dict
output_dict = {}
reversed_attr_map = {value: key for key, value in
model_class.attribute_map.items()}
for javascript_key, value in input_dict.items():
python_key = reversed_attr_map.get(javascript_key)
if python_key is None:
# if the key is unknown, it is in error or it is an
# additionalProperties variable
python_key = javascript_key
output_dict[python_key] = value
return output_dict
def get_type_error(var_value, path_to_item, valid_classes, key_type=False):
error_msg = type_error_message(
var_name=path_to_item[-1],
var_value=var_value,
valid_classes=valid_classes,
key_type=key_type
)
return ApiTypeError(
error_msg,
path_to_item=path_to_item,
valid_classes=valid_classes,
key_type=key_type
)
def deserialize_primitive(data, klass, path_to_item):
"""Deserializes string to primitive type.
:param data: str/int/float
:param klass: str/class the class to convert to
:return: int, float, str, bool, date, datetime
"""
additional_message = ""
try:
if klass in {datetime, date}:
additional_message = (
"If you need your parameter to have a fallback "
"string value, please set its type as `type: {}` in your "
"spec. That allows the value to be any type. "
)
if klass == datetime:
if len(data) < 8:
raise ValueError("This is not a datetime")
# The string should be in iso8601 datetime format.
parsed_datetime = parse(data)
date_only = (
parsed_datetime.hour == 0 and
parsed_datetime.minute == 0 and
parsed_datetime.second == 0 and
parsed_datetime.tzinfo is None and
8 <= len(data) <= 10
)
if date_only:
raise ValueError("This is a date, not a datetime")
return parsed_datetime
elif klass == date:
if len(data) < 8:
raise ValueError("This is not a date")
return parse(data).date()
else:
converted_value = klass(data)
if isinstance(data, str) and klass == float:
if str(converted_value) != data:
# '7' -> 7.0 -> '7.0' != '7'
raise ValueError('This is not a float')
return converted_value
except (OverflowError, ValueError) as ex:
# parse can raise OverflowError
raise ApiValueError(
"{0}Failed to parse {1} as {2}".format(
additional_message, repr(data), klass.__name__
),
path_to_item=path_to_item
) from ex
def get_discriminator_class(model_class,
discr_name,
discr_value, cls_visited):
"""Returns the child class specified by the discriminator.
Args:
model_class (OpenApiModel): the model class.
discr_name (string): the name of the discriminator property.
discr_value (any): the discriminator value.
cls_visited (list): list of model classes that have been visited.
Used to determine the discriminator class without
visiting circular references indefinitely.
Returns:
used_model_class (class/None): the chosen child class that will be used
to deserialize the data, for example dog.Dog.
If a class is not found, None is returned.
"""
if model_class in cls_visited:
# The class has already been visited and no suitable class was found.
return None
cls_visited.append(model_class)
used_model_class = None
if discr_name in model_class.discriminator:
class_name_to_discr_class = model_class.discriminator[discr_name]
used_model_class = class_name_to_discr_class.get(discr_value)
if used_model_class is None:
# We didn't find a discriminated class in class_name_to_discr_class.
# So look in the ancestor or descendant discriminators
# The discriminator mapping may exist in a descendant (anyOf, oneOf)
# or ancestor (allOf).
# Ancestor example: in the GrandparentAnimal -> ParentPet -> ChildCat
# hierarchy, the discriminator mappings may be defined at any level
# in the hierarchy.
# Descendant example: mammal -> whale/zebra/Pig -> BasquePig/DanishPig
# if we try to make BasquePig from mammal, we need to travel through
# the oneOf descendant discriminators to find BasquePig
descendant_classes = model_class._composed_schemas.get('oneOf', ()) + \
model_class._composed_schemas.get('anyOf', ())
ancestor_classes = model_class._composed_schemas.get('allOf', ())
possible_classes = descendant_classes + ancestor_classes
for cls in possible_classes:
# Check if the schema has inherited discriminators.
if hasattr(cls, 'discriminator') and cls.discriminator is not None:
used_model_class = get_discriminator_class(
cls, discr_name, discr_value, cls_visited)
if used_model_class is not None:
return used_model_class
return used_model_class
def deserialize_model(model_data, model_class, path_to_item, check_type,
configuration, spec_property_naming):
"""Deserializes model_data to model instance.
Args:
model_data (int/str/float/bool/none_type/list/dict): data to instantiate the model
model_class (OpenApiModel): the model class
path_to_item (list): path to the model in the received data
check_type (bool): whether to check the data tupe for the values in
the model
configuration (Configuration): the instance to use to convert files
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
Returns:
model instance
Raise:
ApiTypeError
ApiValueError
ApiKeyError
"""
kw_args = dict(_check_type=check_type,
_path_to_item=path_to_item,
_configuration=configuration,
_spec_property_naming=spec_property_naming)
if issubclass(model_class, ModelSimple):
return model_class._new_from_openapi_data(model_data, **kw_args)
elif isinstance(model_data, list):
return model_class._new_from_openapi_data(*model_data, **kw_args)
if isinstance(model_data, dict):
kw_args.update(model_data)
return model_class._new_from_openapi_data(**kw_args)
elif isinstance(model_data, PRIMITIVE_TYPES):
return model_class._new_from_openapi_data(model_data, **kw_args)
def deserialize_file(response_data, configuration, content_disposition=None):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
Args:
param response_data (str): the file data to write
configuration (Configuration): the instance to use to convert files
Keyword Args:
content_disposition (str): the value of the Content-Disposition
header
Returns:
(file_type): the deserialized file which is open
The user is responsible for closing and reading the file
"""
fd, path = tempfile.mkstemp(dir=configuration.temp_folder_path)
os.close(fd)
os.remove(path)
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
if isinstance(response_data, str):
# change str to bytes so we can write it
response_data = response_data.encode('utf-8')
f.write(response_data)
f = open(path, "rb")
return f
def attempt_convert_item(input_value, valid_classes, path_to_item,
configuration, spec_property_naming, key_type=False,
must_convert=False, check_type=True):
"""
Args:
input_value (any): the data to convert
valid_classes (any): the classes that are valid
path_to_item (list): the path to the item to convert
configuration (Configuration): the instance to use to convert files
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
key_type (bool): if True we need to convert a key type (not supported)
must_convert (bool): if True we must convert
check_type (bool): if True we check the type or the returned data in
ModelComposed/ModelNormal/ModelSimple instances
Returns:
instance (any) the fixed item
Raises:
ApiTypeError
ApiValueError
ApiKeyError
"""
valid_classes_ordered = order_response_types(valid_classes)
valid_classes_coercible = remove_uncoercible(
valid_classes_ordered, input_value, spec_property_naming)
if not valid_classes_coercible or key_type:
# we do not handle keytype errors, json will take care
# of this for us
if configuration is None or not configuration.discard_unknown_keys:
raise get_type_error(input_value, path_to_item, valid_classes,
key_type=key_type)
for valid_class in valid_classes_coercible:
try:
if issubclass(valid_class, OpenApiModel):
return deserialize_model(input_value, valid_class,
path_to_item, check_type,
configuration, spec_property_naming)
elif valid_class == file_type:
return deserialize_file(input_value, configuration)
return deserialize_primitive(input_value, valid_class,
path_to_item)
except (ApiTypeError, ApiValueError, ApiKeyError) as conversion_exc:
if must_convert:
raise conversion_exc
# if we have conversion errors when must_convert == False
# we ignore the exception and move on to the next class
continue
# we were unable to convert, must_convert == False
return input_value
def is_type_nullable(input_type):
"""
Returns true if None is an allowed value for the specified input_type.
A type is nullable if at least one of the following conditions is true:
1. The OAS 'nullable' attribute has been specified,
1. The type is the 'null' type,
1. The type is a anyOf/oneOf composed schema, and a child schema is
the 'null' type.
Args:
input_type (type): the class of the input_value that we are
checking
Returns:
bool
"""
if input_type is none_type:
return True
if issubclass(input_type, OpenApiModel) and input_type._nullable:
return True
if issubclass(input_type, ModelComposed):
# If oneOf/anyOf, check if the 'null' type is one of the allowed types.
for t in input_type._composed_schemas.get('oneOf', ()):
if is_type_nullable(t): return True
for t in input_type._composed_schemas.get('anyOf', ()):
if is_type_nullable(t): return True
return False
def is_valid_type(input_class_simple, valid_classes):
"""
Args:
input_class_simple (class): the class of the input_value that we are
checking
valid_classes (tuple): the valid classes that the current item
should be
Returns:
bool
"""
valid_type = input_class_simple in valid_classes
if not valid_type and (
issubclass(input_class_simple, OpenApiModel) or
input_class_simple is none_type):
for valid_class in valid_classes:
if input_class_simple is none_type and is_type_nullable(valid_class):
# Schema is oneOf/anyOf and the 'null' type is one of the allowed types.
return True
if not (issubclass(valid_class, OpenApiModel) and valid_class.discriminator):
continue
discr_propertyname_py = list(valid_class.discriminator.keys())[0]
discriminator_classes = (
valid_class.discriminator[discr_propertyname_py].values()
)
valid_type = is_valid_type(input_class_simple, discriminator_classes)
if valid_type:
return True
return valid_type
def validate_and_convert_types(input_value, required_types_mixed, path_to_item,
spec_property_naming, _check_type, configuration=None):
"""Raises a TypeError is there is a problem, otherwise returns value
Args:
input_value (any): the data to validate/convert
required_types_mixed (list/dict/tuple): A list of
valid classes, or a list tuples of valid classes, or a dict where
the value is a tuple of value classes
path_to_item: (list) the path to the data being validated
this stores a list of keys or indices to get to the data being
validated
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
_check_type: (boolean) if true, type will be checked and conversion
will be attempted.
configuration: (Configuration): the configuration class to use
when converting file_type items.
If passed, conversion will be attempted when possible
If not passed, no conversions will be attempted and
exceptions will be raised
Returns:
the correctly typed value
Raises:
ApiTypeError
"""
results = get_required_type_classes(required_types_mixed, spec_property_naming)
valid_classes, child_req_types_by_current_type = results
input_class_simple = get_simple_class(input_value)
valid_type = is_valid_type(input_class_simple, valid_classes)
if not valid_type:
if configuration:
# if input_value is not valid_type try to convert it
converted_instance = attempt_convert_item(
input_value,
valid_classes,
path_to_item,
configuration,
spec_property_naming,
key_type=False,
must_convert=True,
check_type=_check_type
)
return converted_instance
else:
raise get_type_error(input_value, path_to_item, valid_classes,
key_type=False)
# input_value's type is in valid_classes
if len(valid_classes) > 1 and configuration:
# there are valid classes which are not the current class
valid_classes_coercible = remove_uncoercible(
valid_classes, input_value, spec_property_naming, must_convert=False)
if valid_classes_coercible:
converted_instance = attempt_convert_item(
input_value,
valid_classes_coercible,
path_to_item,
configuration,
spec_property_naming,
key_type=False,
must_convert=False,
check_type=_check_type
)
return converted_instance
if child_req_types_by_current_type == {}:
# all types are of the required types and there are no more inner
# variables left to look at
return input_value
inner_required_types = child_req_types_by_current_type.get(
type(input_value)
)
if inner_required_types is None:
# for this type, there are not more inner variables left to look at
return input_value
if isinstance(input_value, list):
if input_value == []:
# allow an empty list
return input_value
for index, inner_value in enumerate(input_value):
inner_path = list(path_to_item)
inner_path.append(index)
input_value[index] = validate_and_convert_types(
inner_value,
inner_required_types,
inner_path,
spec_property_naming,
_check_type,
configuration=configuration
)
elif isinstance(input_value, dict):
if input_value == {}:
# allow an empty dict
return input_value
for inner_key, inner_val in input_value.items():
inner_path = list(path_to_item)
inner_path.append(inner_key)
if get_simple_class(inner_key) != str:
raise get_type_error(inner_key, inner_path, valid_classes,
key_type=True)
input_value[inner_key] = validate_and_convert_types(
inner_val,
inner_required_types,
inner_path,
spec_property_naming,
_check_type,
configuration=configuration
)
return input_value
def model_to_dict(model_instance, serialize=True):
"""Returns the model properties as a dict
Args:
model_instance (one of your model instances): the model instance that
will be converted to a dict.
Keyword Args:
serialize (bool): if True, the keys in the dict will be values from
attribute_map
"""
result = {}
model_instances = [model_instance]
if model_instance._composed_schemas:
model_instances.extend(model_instance._composed_instances)
seen_json_attribute_names = set()
used_fallback_python_attribute_names = set()
py_to_json_map = {}
for model_instance in model_instances:
for attr, value in model_instance._data_store.items():
if serialize:
# we use get here because additional property key names do not
# exist in attribute_map
try:
attr = model_instance.attribute_map[attr]
py_to_json_map.update(model_instance.attribute_map)
seen_json_attribute_names.add(attr)
except KeyError:
used_fallback_python_attribute_names.add(attr)
if isinstance(value, list):
if not value:
# empty list or None
result[attr] = value
else:
res = []
for v in value:
if isinstance(v, PRIMITIVE_TYPES) or v is None:
res.append(v)
elif isinstance(v, ModelSimple):
res.append(v.value)
else:
res.append(model_to_dict(v, serialize=serialize))
result[attr] = res
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0],
model_to_dict(item[1], serialize=serialize))
if hasattr(item[1], '_data_store') else item,
value.items()
))
elif isinstance(value, ModelSimple):
result[attr] = value.value
elif hasattr(value, '_data_store'):
result[attr] = model_to_dict(value, serialize=serialize)
else:
result[attr] = value
if serialize:
for python_key in used_fallback_python_attribute_names:
json_key = py_to_json_map.get(python_key)
if json_key is None:
continue
if python_key == json_key:
continue
json_key_assigned_no_need_for_python_key = json_key in seen_json_attribute_names
if json_key_assigned_no_need_for_python_key:
del result[python_key]
return result
def type_error_message(var_value=None, var_name=None, valid_classes=None,
key_type=None):
"""
Keyword Args:
var_value (any): the variable which has the type_error
var_name (str): the name of the variable which has the typ error
valid_classes (tuple): the accepted classes for current_item's
value
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
"""
key_or_value = 'value'
if key_type:
key_or_value = 'key'
valid_classes_phrase = get_valid_classes_phrase(valid_classes)
msg = (
"Invalid type for variable '{0}'. Required {1} type {2} and "
"passed type was {3}".format(
var_name,
key_or_value,
valid_classes_phrase,
type(var_value).__name__,
)
)
return msg
def get_valid_classes_phrase(input_classes):
"""Returns a string phrase describing what types are allowed
"""
all_classes = list(input_classes)
all_classes = sorted(all_classes, key=lambda cls: cls.__name__)
all_class_names = [cls.__name__ for cls in all_classes]
if len(all_class_names) == 1:
return 'is {0}'.format(all_class_names[0])
return "is one of [{0}]".format(", ".join(all_class_names))
def get_allof_instances(self, model_args, constant_args):
"""
Args:
self: the class we are handling
model_args (dict): var_name to var_value
used to make instances
constant_args (dict):
metadata arguments:
_check_type
_path_to_item
_spec_property_naming
_configuration
_visited_composed_classes
Returns
composed_instances (list)
"""
composed_instances = []
for allof_class in self._composed_schemas['allOf']:
try:
allof_instance = allof_class(**model_args, **constant_args)
composed_instances.append(allof_instance)
except Exception as ex:
raise ApiValueError(
"Invalid inputs given to generate an instance of '%s'. The "
"input data was invalid for the allOf schema '%s' in the composed "
"schema '%s'. Error=%s" % (
allof_class.__name__,
allof_class.__name__,
self.__class__.__name__,
str(ex)
)
) from ex
return composed_instances
def get_oneof_instance(cls, model_kwargs, constant_kwargs, model_arg=None):
"""
Find the oneOf schema that matches the input data (e.g. payload).
If exactly one schema matches the input data, an instance of that schema
is returned.
If zero or more than one schema match the input data, an exception is raised.
In OAS 3.x, the payload MUST, by validation, match exactly one of the
schemas described by oneOf.
Args:
cls: the class we are handling
model_kwargs (dict): var_name to var_value
The input data, e.g. the payload that must match a oneOf schema
in the OpenAPI document.
constant_kwargs (dict): var_name to var_value
args that every model requires, including configuration, server
and path to item.
Kwargs:
model_arg: (int, float, bool, str, date, datetime, ModelSimple, None):
the value to assign to a primitive class or ModelSimple class
Notes:
- this is only passed in when oneOf includes types which are not object
- None is used to suppress handling of model_arg, nullable models are handled in __new__
Returns
oneof_instance (instance)
"""
if len(cls._composed_schemas['oneOf']) == 0:
return None
oneof_instances = []
# Iterate over each oneOf schema and determine if the input data
# matches the oneOf schemas.
for oneof_class in cls._composed_schemas['oneOf']:
# The composed oneOf schema allows the 'null' type and the input data
# is the null value. This is a OAS >= 3.1 feature.
if oneof_class is none_type:
# skip none_types because we are deserializing dict data.
# none_type deserialization is handled in the __new__ method
continue
single_value_input = allows_single_value_input(oneof_class)
try:
if not single_value_input:
oneof_instance = oneof_class(**model_kwargs, **constant_kwargs)
else:
if issubclass(oneof_class, ModelSimple):
oneof_instance = oneof_class(model_arg, **constant_kwargs)
elif oneof_class in PRIMITIVE_TYPES:
oneof_instance = validate_and_convert_types(
model_arg,
(oneof_class,),
constant_kwargs['_path_to_item'],
constant_kwargs['_spec_property_naming'],
constant_kwargs['_check_type'],
configuration=constant_kwargs['_configuration']
)
oneof_instances.append(oneof_instance)
except Exception:
pass
if len(oneof_instances) == 0:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. None "
"of the oneOf schemas matched the input data." %
cls.__name__
)
elif len(oneof_instances) > 1:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. Multiple "
"oneOf schemas matched the inputs, but a max of one is allowed." %
cls.__name__
)
return oneof_instances[0]
def get_anyof_instances(self, model_args, constant_args):
"""
Args:
self: the class we are handling
model_args (dict): var_name to var_value
The input data, e.g. the payload that must match at least one
anyOf child schema in the OpenAPI document.
constant_args (dict): var_name to var_value
args that every model requires, including configuration, server
and path to item.
Returns
anyof_instances (list)
"""
anyof_instances = []
if len(self._composed_schemas['anyOf']) == 0:
return anyof_instances
for anyof_class in self._composed_schemas['anyOf']:
# The composed oneOf schema allows the 'null' type and the input data
# is the null value. This is a OAS >= 3.1 feature.
if anyof_class is none_type:
# skip none_types because we are deserializing dict data.
# none_type deserialization is handled in the __new__ method
continue
try:
anyof_instance = anyof_class(**model_args, **constant_args)
anyof_instances.append(anyof_instance)
except Exception:
pass
if len(anyof_instances) == 0:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. None of the "
"anyOf schemas matched the inputs." %
self.__class__.__name__
)
return anyof_instances
def get_discarded_args(self, composed_instances, model_args):
"""
Gathers the args that were discarded by configuration.discard_unknown_keys
"""
model_arg_keys = model_args.keys()
discarded_args = set()
# arguments passed to self were already converted to python names
# before __init__ was called
for instance in composed_instances:
if instance.__class__ in self._composed_schemas['allOf']:
try:
keys = instance.to_dict().keys()
discarded_keys = model_args - keys
discarded_args.update(discarded_keys)
except Exception:
# allOf integer schema will throw exception
pass
else:
try:
all_keys = set(model_to_dict(instance, serialize=False).keys())
js_keys = model_to_dict(instance, serialize=True).keys()
all_keys.update(js_keys)
discarded_keys = model_arg_keys - all_keys
discarded_args.update(discarded_keys)
except Exception:
# allOf integer schema will throw exception
pass
return discarded_args
def validate_get_composed_info(constant_args, model_args, self):
"""
For composed schemas, generate schema instances for
all schemas in the oneOf/anyOf/allOf definition. If additional
properties are allowed, also assign those properties on
all matched schemas that contain additionalProperties.
Openapi schemas are python classes.
Exceptions are raised if:
- 0 or > 1 oneOf schema matches the model_args input data
- no anyOf schema matches the model_args input data
- any of the allOf schemas do not match the model_args input data
Args:
constant_args (dict): these are the args that every model requires
model_args (dict): these are the required and optional spec args that
were passed in to make this model
self (class): the class that we are instantiating
This class contains self._composed_schemas
Returns:
composed_info (list): length three
composed_instances (list): the composed instances which are not
self
var_name_to_model_instances (dict): a dict going from var_name
to the model_instance which holds that var_name
the model_instance may be self or an instance of one of the
classes in self.composed_instances()
additional_properties_model_instances (list): a list of the
model instances which have the property
additional_properties_type. This list can include self
"""
# create composed_instances
composed_instances = []
allof_instances = get_allof_instances(self, model_args, constant_args)
composed_instances.extend(allof_instances)
oneof_instance = get_oneof_instance(self.__class__, model_args, constant_args)
if oneof_instance is not None:
composed_instances.append(oneof_instance)
anyof_instances = get_anyof_instances(self, model_args, constant_args)
composed_instances.extend(anyof_instances)
"""
set additional_properties_model_instances
additional properties must be evaluated at the schema level
so self's additional properties are most important
If self is a composed schema with:
- no properties defined in self
- additionalProperties: False
Then for object payloads every property is an additional property
and they are not allowed, so only empty dict is allowed
Properties must be set on all matching schemas
so when a property is assigned toa composed instance, it must be set on all
composed instances regardless of additionalProperties presence
keeping it to prevent breaking changes in v5.0.1
TODO remove cls._additional_properties_model_instances in 6.0.0
"""
additional_properties_model_instances = []
if self.additional_properties_type is not None:
additional_properties_model_instances = [self]
"""
no need to set properties on self in here, they will be set in __init__
By here all composed schema oneOf/anyOf/allOf instances have their properties set using
model_args
"""
discarded_args = get_discarded_args(self, composed_instances, model_args)
# map variable names to composed_instances
var_name_to_model_instances = {}
for prop_name in model_args:
if prop_name not in discarded_args:
var_name_to_model_instances[prop_name] = [self] + composed_instances
return [
composed_instances,
var_name_to_model_instances,
additional_properties_model_instances,
discarded_args
]
|
get_simple_class
|
test_watcher.py
|
import tempfile
import time
from contextlib import contextmanager
from os import path
from shutil import rmtree
import pytest
from galaxy.tool_util.toolbox import watcher
from galaxy.util import bunch
@pytest.mark.skipif(not watcher.can_watch, reason="watchdog not available")
def test_watcher():
with __test_directory() as t:
tool_path = path.join(t, "test.xml")
toolbox = Toolbox()
with open(tool_path, "w") as f:
f.write("a")
tool_watcher = watcher.get_tool_watcher(toolbox, bunch.Bunch(watch_tools=True))
tool_watcher.start()
tool_watcher.watch_file(tool_path, "cool_tool")
time.sleep(2)
assert not toolbox.was_reloaded("cool_tool")
with open(tool_path, "w") as f:
f.write("b")
wait_for_reload(lambda: toolbox.was_reloaded("cool_tool"))
tool_watcher.shutdown()
assert tool_watcher.observer is None
@pytest.mark.skipif(not watcher.can_watch, reason="watchdog not available")
def test_tool_conf_watcher():
callback = CallbackRecorder()
conf_watcher = watcher.get_tool_conf_watcher(callback.call)
conf_watcher.start()
with __test_directory() as t:
tool_conf_path = path.join(t, "test_conf.xml")
with open(tool_conf_path, "w") as f:
f.write("a")
conf_watcher.watch_file(tool_conf_path)
time.sleep(2)
with open(tool_conf_path, "w") as f:
f.write("b")
wait_for_reload(lambda: callback.called)
conf_watcher.shutdown()
assert conf_watcher.thread is None
def wait_for_reload(check):
reloaded = False
for _ in range(10):
reloaded = check()
if reloaded:
break
time.sleep(0.2)
assert reloaded
class Toolbox:
def __init__(self):
self.reloaded = {}
def reload_tool_by_id(self, tool_id):
self.reloaded[tool_id] = True
def was_reloaded(self, tool_id):
|
return self.reloaded.get(tool_id, False)
class CallbackRecorder:
def __init__(self):
self.called = False
def call(self):
self.called = True
@contextmanager
def __test_directory():
base_path = tempfile.mkdtemp()
try:
yield base_path
finally:
rmtree(base_path)
| |
classinstance1.py
|
class Leapx_org():
def __init__(self,first,last,pay):
self.f_name = first
self.l_name = last
self.pay_amt = pay
self.full_name = first+" "+last
def make_email(self):
return self.f_name+ "."+self.l_name+"@xyz.com"
def incrementpay(self):
|
L_obj1 = Leapx_org('mohit', 'RAJ', 60000)
L_obj2 = Leapx_org('Ravender', 'Dahiya',70000)
print L_obj1.pay_amt
print L_obj1.incrementpay()
|
self.pay_amt = int(self.pay_amt*1.20)
return self.pay_amt
|
scriptMensal.js
|
function ir() {
var acertos = Number(document.getElementById('acertos').value)
var informaçao = document.getElementById('informaçao')
var meta = 360
var meta2 = Math.round(100 - ((acertos * 100) / meta))
var meta3 = Math.round(((acertos * 100) / meta))
var resto = Math.round(((80 - meta3) / 100) * 360)
if (meta3 < 80) {
document.getElementById('barra').style = `width: 70%;height: 20px;border-radius:10px;border: thin solid #2d2a3b; margin: auto; background: linear-gradient(to right, #558077 0%, #558077 ${meta3}%, #fbc7d4 0%)`
document.getElementById('shadow').style = 'height: 410px'
document.getElementById('voltar').style = ' margin-top: 5px;'
informaçao.innerHTML = ''
informaçao.innerHTML +=
`<section>
<div id="barra">${meta3}%</div>
</section>
|
document.getElementById('shadow').style = 'height: 360px'
document.getElementById('voltar').style = ' margin-top: 5px;'
informaçao.innerHTML = ''
informaçao.innerHTML +=
` <section>
<div id="barra">${meta3}%</div>
</section>
<div id="mensagem"><p>Parabéns.</br> Sua meta de 80% <strong> foi alcançada</strong> com um acerto de ${meta3}%</p></div>`
}
}
|
<div id="mensagem"><p>Em um total de 100%, você errou ${meta2}% das 360 questões.</br></br> Seu progresso foi de ${meta3}%, porém a sua meta é de 80%. Sendo assim, a sua meta <strong>não foi alcançada</strong>. Dessa forma, para atingir o seu objetivo, você precisaria de mais ${80-meta3}%, ou seja , acertar mais ${resto} questões.</p></div>`
} else {
document.getElementById('barra').style = `width: 70%;height: 20px;border-radius:10px;border: thin solid #2d2a3b; margin: auto; background: linear-gradient(to right, #558077 0%, #558077 ${meta3}%, #fbc7d4 0%)`
|
model.py
|
# Copyright (c) 2015 Walt Chen
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
class ParserException(Exception):
pass
class Stock(object):
# yesterday_close is yesterday close price
# close is today close price
# volume: unit of stock transacted
# turnover: total transaction money
def __init__(self, code, date, open, high, low, close, volume):
self.code = code
self.date = str(date)
self.open = open
self.high = high
self.low = low
self.close = close
self.volume = volume
def
|
(self):
return "%s\tvol: %s\topen: %s\tHI: %s\t LO: %s\tclose: %s" %\
(self.date, self.volume, self.open, self.high, self.low, self.close)
__all__ = ['ParserException', 'Stock']
|
__str__
|
test_get_current_branch.rs
|
use std::convert::TryInto;
use std::{
convert::TryFrom,
time::{Duration, SystemTime},
};
use crypto::hash::ChainId;
use shell_automaton::config::default_test_config;
use shell_automaton::service::storage_service::{StorageError, StorageResponseError};
use shell_automaton::{
current_head::CurrentHeadState,
event::WakeupEvent,
service::storage_service::{StorageRequestPayload, StorageResponseSuccess},
shell_compatibility_version::ShellCompatibilityVersion,
};
use shell_automaton::{Config, Service, State};
use shell_automaton_testing::service::IOCondition;
use shell_automaton_testing::{build_expected_history, generate_chain};
use shell_automaton_testing::{one_real_node_cluster::Cluster, service::StorageResponse};
use storage::BlockHeaderWithHash;
use tezos_identity::Identity;
use tezos_messages::p2p::encoding::block_header::Level;
use tezos_messages::p2p::encoding::peer::PeerMessage;
use tezos_messages::p2p::encoding::prelude::{BlockLocator, GetCurrentBranchMessage};
fn data(current_head_level: Level) -> (Cluster, Vec<BlockHeaderWithHash>) {
let initial_time = SystemTime::now();
let mut state = State::new(Config {
initial_time,
pow_target: 0.0,
identity: Identity::generate(0.0).unwrap(),
shell_compatibility_version: ShellCompatibilityVersion::new(
"TEZOS_LOCALNET".to_owned(),
vec![1],
vec![1],
),
chain_id: ChainId::try_from("NetXz969SFaFn8k").unwrap(), // granada
check_timeouts_interval: Duration::from_millis(500),
peer_connecting_timeout: Duration::from_millis(2000),
peer_handshaking_timeout: Duration::from_secs(8),
peers_potential_max: 2,
peers_connected_max: 2,
peers_graylist_disable: false,
peers_graylist_timeout: Duration::from_secs(15 * 60),
..default_test_config()
});
let genesis_header = state
.config
.protocol_runner
.environment
.genesis_header(
"CoV8SQumiVU9saiu3FVNeDNewJaJH8yWdsGF3WLdsRr2P9S7MzCj"
.try_into()
.unwrap(),
"LLoZS2LW3rEi7KYU4ouBQtorua37aWWCtpDmv1n2x3xoKi6sVXLWp"
.try_into()
.unwrap(),
)
.unwrap();
let genesis_block = BlockHeaderWithHash {
hash: state
.config
.init_storage_data
.genesis_block_header_hash
.clone(),
header: genesis_header.into(),
};
|
let chain = generate_chain(genesis_block, current_head_level);
let head = chain.last().unwrap().clone();
let head_pred = chain.iter().rev().nth(1).cloned();
state.current_head = CurrentHeadState::rehydrated(head, head_pred);
(Cluster::new(state, initial_time), chain)
}
/// Test GetCurrentBranch response from the node.
///
/// # Parameters:
/// - `current_head_level`: configure current head level of the node.
/// - `missing_bellow_level`: level, bellow which, we will return `None`
/// for storage requests. This is to simulate case, when we don't have
/// block header available in storage.
/// - `error_at_level`: level, bellow which, we will return error for
/// storage requests.
fn test(
current_head_level: Level,
missing_bellow_level: Option<Level>,
error_bellow_level: Option<Level>,
) {
let (mut cluster, chain) = data(current_head_level);
let peer_id = cluster.peer_init(0.0);
cluster.connect_to_peer(peer_id);
cluster.set_peer_connected(peer_id);
cluster.do_handshake(peer_id).unwrap();
let chain_id = cluster.state().config.chain_id.clone();
let node_pkh = cluster.state().config.identity.peer_id();
let peer_pkh = cluster.peer(peer_id).identity().peer_id();
let expected_current_head = cluster.state().current_head.get().unwrap().clone();
let expected_history = build_expected_history(&chain, node_pkh, peer_pkh, current_head_level);
let peer = cluster.peer(peer_id);
peer.send_peer_message(PeerMessage::GetCurrentBranch(GetCurrentBranchMessage::new(
chain_id,
)));
peer.set_read_cond(IOCondition::NoLimit)
.set_write_cond(IOCondition::NoLimit);
cluster.dispatch_peer_ready_event(peer_id, true, true, false);
eprintln!("Expected CurrentHead: {:?}", expected_current_head);
eprintln!("Expected History:");
for block in expected_history.iter() {
eprintln!("({} - {})", block.header.level(), block.hash);
}
eprintln!("----------------\n");
let mut was_min_level_reached = false;
for block in expected_history.iter() {
if was_min_level_reached {
break;
}
let service = cluster.service().storage();
loop {
let req = service
.requests
.pop_front()
.expect("Expected storage request from state machine");
match req.payload {
StorageRequestPayload::BlockHashByLevelGet(level) => {
assert_eq!(
level,
block.header.level(),
"State machine requested invalid level for current branch!"
);
let result = if missing_bellow_level.filter(|l| level <= *l).is_some() {
was_min_level_reached = true;
Ok(StorageResponseSuccess::BlockHashByLevelGetSuccess(None))
} else if error_bellow_level.filter(|l| level <= *l).is_some() {
was_min_level_reached = true;
Err(StorageResponseError::BlockHashByLevelGetError(
StorageError::mocked(),
))
} else {
Ok(StorageResponseSuccess::BlockHashByLevelGetSuccess(Some(
block.hash.clone(),
)))
};
service.responses.push_back(StorageResponse {
req_id: req.id,
result,
});
cluster.dispatch(WakeupEvent);
break;
}
_ => continue,
}
}
}
eprintln!("State: {:?}", cluster.state());
let current_branch = loop {
let msg = cluster
.peer(peer_id)
.read_peer_message()
.expect("Expected CurrentBranch response");
match msg {
PeerMessage::CurrentBranch(msg) => break msg.current_branch().clone(),
_ => {
cluster.dispatch_peer_ready_event(peer_id, true, true, false);
continue;
}
};
};
let expected_current_branch = {
let min_level = missing_bellow_level.or(error_bellow_level).unwrap_or(-1);
let history = expected_history
.into_iter()
.filter(|b| b.header.level() > min_level)
.map(|b| b.hash)
.collect();
BlockLocator::new((*expected_current_head.header).clone(), history)
};
assert_eq!(current_branch, expected_current_branch);
}
#[test]
fn test_get_current_branch_current_head_0() {
test(0, None, None);
}
/// Should always succeed as storage shouldn't even be touched when
/// current head is 0.
#[test]
fn test_get_current_branch_current_head_0_error() {
test(0, None, Some(0));
}
/// Should always succeed as storage shouldn't even be touched when
/// current head is 0.
#[test]
fn test_get_current_branch_current_head_0_missing() {
test(0, Some(0), None);
}
#[test]
fn test_get_current_branch_current_head_1_to_100() {
for i in 1..=100 {
test(i, None, None);
}
}
#[test]
fn test_get_current_branch_current_head_1_to_100_error() {
for i in 1..=10 {
for j in 0..=i {
test(i, None, Some(j));
}
}
}
#[test]
fn test_get_current_branch_current_head_1_to_100_missing() {
for i in 1..=10 {
for j in 0..=i {
test(i, Some(j), None);
}
}
}
| |
clientconductor.go
|
/*
Copyright 2016-2018 Stanislav Liberman
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aeron
import (
"errors"
"fmt"
"log"
"runtime"
"sync"
"time"
"github.com/lirm/aeron-go/aeron/atomic"
"github.com/lirm/aeron-go/aeron/broadcast"
ctr "github.com/lirm/aeron-go/aeron/counters"
"github.com/lirm/aeron-go/aeron/driver"
"github.com/lirm/aeron-go/aeron/idlestrategy"
"github.com/lirm/aeron-go/aeron/logbuffer"
logging "github.com/op/go-logging"
)
var RegistrationStatus = struct {
AwaitingMediaDriver int
RegisteredMediaDriver int
ErroredMediaDriver int
}{
0,
1,
2,
}
const (
keepaliveTimeoutNS = 500 * int64(time.Millisecond)
resourceTimeoutNS = 1000 * int64(time.Millisecond)
)
type publicationStateDefn struct {
regID int64
origRegID int64
timeOfRegistration int64
streamID int32
sessionID int32
posLimitCounterID int32
channelStatusIndicatorID int32
errorCode int32
status int
channel string
errorMessage string
buffers *logbuffer.LogBuffers
publication *Publication
}
func (pub *publicationStateDefn) Init(channel string, regID int64, streamID int32, now int64) *publicationStateDefn {
pub.channel = channel
pub.regID = regID
pub.streamID = streamID
pub.sessionID = -1
pub.posLimitCounterID = -1
pub.timeOfRegistration = now
pub.status = RegistrationStatus.AwaitingMediaDriver
return pub
}
type subscriptionStateDefn struct {
regID int64
timeOfRegistration int64
streamID int32
errorCode int32
status int
channel string
errorMessage string
subscription *Subscription
}
func (sub *subscriptionStateDefn) Init(ch string, regID int64, sID int32, now int64) *subscriptionStateDefn {
sub.channel = ch
sub.regID = regID
sub.streamID = sID
sub.timeOfRegistration = now
sub.status = RegistrationStatus.AwaitingMediaDriver
return sub
}
type lingerResourse struct {
lastTime int64
resource *Image
}
type ClientConductor struct {
pubs []*publicationStateDefn
subs []*subscriptionStateDefn
driverProxy *driver.Proxy
counterValuesBuffer *atomic.Buffer
counterReader *ctr.Reader
driverListenerAdapter *driver.ListenerAdapter
adminLock sync.Mutex
pendingCloses map[int64]chan bool
lingeringResources chan lingerResourse
onNewPublicationHandler NewPublicationHandler
onNewSubscriptionHandler NewSubscriptionHandler
onAvailableImageHandler AvailableImageHandler
onUnavailableImageHandler UnavailableImageHandler
errorHandler func(error)
running atomic.Bool
conductorRunning atomic.Bool
driverActive atomic.Bool
timeOfLastKeepalive int64
timeOfLastCheckManagedResources int64
timeOfLastDoWork int64
driverTimeoutNs int64
interServiceTimeoutNs int64
publicationConnectionTimeoutNs int64
resourceLingerTimeoutNs int64
}
// Init is the primary initialization method for ClientConductor
func (cc *ClientConductor) Init(driverProxy *driver.Proxy, bcast *broadcast.CopyReceiver,
interServiceTo, driverTo, pubConnectionTo, lingerTo time.Duration, counters *ctr.MetaDataFlyweight) *ClientConductor {
logger.Debugf("Initializing ClientConductor with: %v %v %d %d %d", driverProxy, bcast, interServiceTo,
driverTo, pubConnectionTo)
cc.driverProxy = driverProxy
cc.running.Set(true)
cc.driverActive.Set(true)
cc.driverListenerAdapter = driver.NewAdapter(cc, bcast)
cc.interServiceTimeoutNs = interServiceTo.Nanoseconds()
cc.driverTimeoutNs = driverTo.Nanoseconds()
cc.publicationConnectionTimeoutNs = pubConnectionTo.Nanoseconds()
cc.resourceLingerTimeoutNs = lingerTo.Nanoseconds()
cc.counterValuesBuffer = counters.ValuesBuf.Get()
cc.counterReader = ctr.NewReader(counters.ValuesBuf.Get(), counters.MetaDataBuf.Get())
cc.pendingCloses = make(map[int64]chan bool)
cc.lingeringResources = make(chan lingerResourse, 1024)
cc.pubs = make([]*publicationStateDefn, 0)
cc.subs = make([]*subscriptionStateDefn, 0)
return cc
}
// Close will terminate the Run() goroutine body and close all active publications and subscription. Run() can
// be restarted in a another goroutine.
func (cc *ClientConductor) Close() error {
logger.Debugf("Closing ClientConductor")
var err error
if cc.running.CompareAndSet(true, false) {
for _, pub := range cc.pubs {
if pub.publication != nil {
err = pub.publication.Close()
if err != nil {
cc.errorHandler(err)
}
}
}
for _, sub := range cc.subs {
if sub.subscription != nil {
err = sub.subscription.Close()
if err != nil {
cc.errorHandler(err)
}
}
}
}
timeoutDuration := 5 * time.Second
timeout := time.Now().Add(timeoutDuration)
for cc.conductorRunning.Get() && time.Now().Before(timeout) {
time.Sleep(10 * time.Millisecond)
}
if cc.conductorRunning.Get() {
msg := fmt.Sprintf("failed to stop conductor after %v", timeoutDuration)
logger.Warning(msg)
err = errors.New(msg)
}
logger.Debugf("Closed ClientConductor")
return err
}
// Start begins the main execution loop of ClientConductor on a goroutine.
func (cc *ClientConductor) Start(idleStrategy idlestrategy.Idler) {
cc.running.Set(true)
go cc.run(idleStrategy)
}
// run is the main execution loop of ClientConductor.
func (cc *ClientConductor) run(idleStrategy idlestrategy.Idler) {
now := time.Now().UnixNano()
cc.timeOfLastKeepalive = now
cc.timeOfLastCheckManagedResources = now
cc.timeOfLastDoWork = now
// Stay on the same thread for performance
runtime.LockOSThread()
// Clean exit from this particular go routine
defer func() {
if err := recover(); err != nil {
errStr := fmt.Sprintf("Panic: %v", err)
logger.Error(errStr)
cc.errorHandler(errors.New(errStr))
cc.running.Set(false)
}
cc.conductorRunning.Set(false)
logger.Infof("ClientConductor done")
}()
cc.conductorRunning.Set(true)
for cc.running.Get() {
workCount := cc.driverListenerAdapter.ReceiveMessages()
workCount += cc.onHeartbeatCheckTimeouts()
idleStrategy.Idle(workCount)
}
}
func (cc *ClientConductor) verifyDriverIsActive() {
if !cc.driverActive.Get() {
log.Fatal("Driver is not active")
}
}
// AddPublication sends the add publication command through the driver proxy
func (cc *ClientConductor) AddPublication(channel string, streamID int32) int64 {
logger.Debugf("AddPublication: channel=%s, streamId=%d", channel, streamID)
cc.verifyDriverIsActive()
cc.adminLock.Lock()
defer cc.adminLock.Unlock()
for _, pub := range cc.pubs {
if pub.streamID == streamID && pub.channel == channel {
return pub.regID
}
}
now := time.Now().UnixNano()
regID := cc.driverProxy.AddPublication(channel, streamID)
pubState := new(publicationStateDefn)
pubState.Init(channel, regID, streamID, now)
cc.pubs = append(cc.pubs, pubState)
return regID
}
// AddExclusivePublication sends the add publication command through the driver proxy
func (cc *ClientConductor) AddExclusivePublication(channel string, streamID int32) int64 {
logger.Debugf("AddExclusivePublication: channel=%s, streamId=%d", channel, streamID)
cc.verifyDriverIsActive()
cc.adminLock.Lock()
defer cc.adminLock.Unlock()
for _, pub := range cc.pubs {
if pub.streamID == streamID && pub.channel == channel {
return pub.regID
}
}
now := time.Now().UnixNano()
regID := cc.driverProxy.AddExclusivePublication(channel, streamID)
pubState := new(publicationStateDefn)
pubState.Init(channel, regID, streamID, now)
cc.pubs = append(cc.pubs, pubState)
return regID
}
func (cc *ClientConductor) FindPublication(regID int64) *Publication {
cc.adminLock.Lock()
defer cc.adminLock.Unlock()
var publication *Publication
for _, pub := range cc.pubs {
if pub.regID == regID {
if pub.publication != nil {
publication = pub.publication
} else {
switch pub.status {
case RegistrationStatus.AwaitingMediaDriver:
waitForMediaDriver(pub.timeOfRegistration, cc)
case RegistrationStatus.RegisteredMediaDriver:
publication = NewPublication(pub.buffers)
publication.conductor = cc
publication.channel = pub.channel
publication.regID = regID
publication.originalRegID = pub.origRegID
publication.streamID = pub.streamID
publication.sessionID = pub.sessionID
publication.pubLimit = NewPosition(cc.counterValuesBuffer, pub.posLimitCounterID)
publication.channelStatusIndicatorID = pub.channelStatusIndicatorID
case RegistrationStatus.ErroredMediaDriver:
log.Fatalf("Error on %d: %d: %s", regID, pub.errorCode, pub.errorMessage)
}
}
break
}
}
return publication
}
func (cc *ClientConductor) releasePublication(regID int64) {
logger.Debugf("ReleasePublication: regID=%d", regID)
cc.verifyDriverIsActive()
cc.adminLock.Lock()
defer cc.adminLock.Unlock()
pubcnt := len(cc.pubs)
for i, pub := range cc.pubs {
if pub != nil && pub.regID == regID {
cc.driverProxy.RemovePublication(regID)
cc.pubs[i] = cc.pubs[pubcnt-1]
cc.pubs[pubcnt-1] = nil
pubcnt--
}
}
cc.pubs = cc.pubs[:pubcnt]
}
// AddSubscription sends the add subscription command through the driver proxy
func (cc *ClientConductor) AddSubscription(channel string, streamID int32) int64 {
logger.Debugf("AddSubscription: channel=%s, streamId=%d", channel, streamID)
cc.verifyDriverIsActive()
cc.adminLock.Lock()
defer cc.adminLock.Unlock()
now := time.Now().UnixNano()
regID := cc.driverProxy.AddSubscription(channel, streamID)
subState := new(subscriptionStateDefn)
subState.Init(channel, regID, streamID, now)
cc.subs = append(cc.subs, subState)
return regID
}
func (cc *ClientConductor) FindSubscription(regID int64) *Subscription {
cc.adminLock.Lock()
defer cc.adminLock.Unlock()
var subscription *Subscription
for _, sub := range cc.subs {
if sub.regID == regID {
switch sub.status {
case RegistrationStatus.AwaitingMediaDriver:
waitForMediaDriver(sub.timeOfRegistration, cc)
case RegistrationStatus.ErroredMediaDriver:
errStr := fmt.Sprintf("Error on %d: %d: %s", regID, sub.errorCode, sub.errorMessage)
cc.errorHandler(errors.New(errStr))
log.Fatalf(errStr)
}
subscription = sub.subscription
break
}
}
return subscription
}
func waitForMediaDriver(timeOfRegistration int64, cc *ClientConductor) {
if now := time.Now().UnixNano(); now > (timeOfRegistration + cc.driverTimeoutNs) {
errStr := fmt.Sprintf("No response from driver. started: %d, now: %d, to: %d",
timeOfRegistration/time.Millisecond.Nanoseconds(),
now/time.Millisecond.Nanoseconds(),
cc.driverTimeoutNs/time.Millisecond.Nanoseconds())
if cc.errorHandler != nil {
cc.errorHandler(errors.New(errStr))
}
log.Fatalf(errStr)
}
}
func (cc *ClientConductor) releaseSubscription(regID int64, images []Image) {
logger.Debugf("ReleaseSubscription: regID=%d", regID)
cc.verifyDriverIsActive()
cc.adminLock.Lock()
defer cc.adminLock.Unlock()
now := time.Now().UnixNano()
subcnt := len(cc.subs)
for i, sub := range cc.subs {
if sub != nil && sub.regID == regID {
if logger.IsEnabledFor(logging.DEBUG) {
logger.Debugf("Removing subscription: %d; %v", regID, images)
}
cc.driverProxy.RemoveSubscription(regID)
cc.subs[i] = cc.subs[subcnt-1]
cc.subs[subcnt-1] = nil
subcnt--
for i := range images {
image := &images[i]
if cc.onUnavailableImageHandler != nil {
cc.onUnavailableImageHandler(image)
}
cc.lingeringResources <- lingerResourse{now, image}
}
}
}
cc.subs = cc.subs[:subcnt]
}
func (cc *ClientConductor) OnNewPublication(streamID int32, sessionID int32, posLimitCounterID int32,
channelStatusIndicatorID int32, logFileName string, regID int64, origRegID int64) {
logger.Debugf("OnNewPublication: streamId=%d, sessionId=%d, posLimitCounterID=%d, channelStatusIndicatorID=%d, logFileName=%s, correlationID=%d, regID=%d",
streamID, sessionID, posLimitCounterID, channelStatusIndicatorID, logFileName, regID, origRegID)
cc.adminLock.Lock()
defer cc.adminLock.Unlock()
for _, pubDef := range cc.pubs {
if pubDef.regID == regID {
pubDef.status = RegistrationStatus.RegisteredMediaDriver
pubDef.sessionID = sessionID
pubDef.posLimitCounterID = posLimitCounterID
pubDef.channelStatusIndicatorID = channelStatusIndicatorID
pubDef.buffers = logbuffer.Wrap(logFileName)
pubDef.origRegID = origRegID
logger.Debugf("Updated publication: %v", pubDef)
if cc.onNewPublicationHandler != nil {
cc.onNewPublicationHandler(pubDef.channel, streamID, sessionID, regID)
}
}
}
}
// TODO Implement logic specific to exclusive publications
func (cc *ClientConductor) OnNewExclusivePublication(streamID int32, sessionID int32, posLimitCounterID int32,
channelStatusIndicatorID int32, logFileName string, regID int64, origRegID int64) {
logger.Debugf("OnNewExclusivePublication: streamId=%d, sessionId=%d, posLimitCounterID=%d, channelStatusIndicatorID=%d, logFileName=%s, correlationID=%d, regID=%d",
streamID, sessionID, posLimitCounterID, channelStatusIndicatorID, logFileName, regID, origRegID)
cc.adminLock.Lock()
defer cc.adminLock.Unlock()
for _, pubDef := range cc.pubs {
if pubDef.regID == regID {
pubDef.status = RegistrationStatus.RegisteredMediaDriver
pubDef.sessionID = sessionID
pubDef.posLimitCounterID = posLimitCounterID
pubDef.channelStatusIndicatorID = channelStatusIndicatorID
pubDef.buffers = logbuffer.Wrap(logFileName)
pubDef.origRegID = origRegID
logger.Debugf("Updated publication: %v", pubDef)
if cc.onNewPublicationHandler != nil {
cc.onNewPublicationHandler(pubDef.channel, streamID, sessionID, regID)
}
}
}
}
func (cc *ClientConductor) OnAvailableCounter(correlationID int64, counterID int32) {
logger.Debugf("OnAvailableCounter: correlationID=%d, counterID=%d",
correlationID, counterID)
cc.adminLock.Lock()
defer cc.adminLock.Unlock()
logger.Warning("OnAvailableCounter: Not supported yet")
}
func (cc *ClientConductor) OnUnavailableCounter(correlationID int64, counterID int32) {
logger.Debugf("OnUnavailableCounter: correlationID=%d, counterID=%d",
correlationID, counterID)
cc.adminLock.Lock()
defer cc.adminLock.Unlock()
logger.Warning("OnUnavailableCounter: Not supported yet")
}
func (cc *ClientConductor) OnClientTimeout(clientID int64) {
logger.Debugf("OnClientTimeout: clientID=%d", clientID)
cc.adminLock.Lock()
defer cc.adminLock.Unlock()
if clientID == cc.driverProxy.ClientID() {
errStr := fmt.Sprintf("OnClientTimeout for ClientID:%d", clientID)
logger.Error(errStr)
if cc.errorHandler != nil {
cc.errorHandler(errors.New(errStr))
|
func (cc *ClientConductor) OnSubscriptionReady(correlationID int64, channelStatusIndicatorID int32) {
logger.Debugf("OnSubscriptionReady: correlationID=%d, channelStatusIndicatorID=%d",
correlationID, channelStatusIndicatorID)
cc.adminLock.Lock()
defer cc.adminLock.Unlock()
for _, sub := range cc.subs {
if sub.regID == correlationID {
sub.status = RegistrationStatus.RegisteredMediaDriver
sub.subscription = NewSubscription(cc, sub.channel, correlationID, sub.streamID)
if cc.onNewSubscriptionHandler != nil {
cc.onNewSubscriptionHandler(sub.channel, sub.streamID, correlationID)
}
}
}
}
func (cc *ClientConductor) OnAvailableImage(streamID int32, sessionID int32, logFilename string, sourceIdentity string,
subscriberPositionID int32, subsRegID int64, corrID int64) {
logger.Debugf("OnAvailableImage: streamId=%d, sessionId=%d, logFilename=%s, sourceIdentity=%s, subsRegID=%d, corrID=%d",
streamID, sessionID, logFilename, sourceIdentity, subsRegID, corrID)
cc.adminLock.Lock()
defer cc.adminLock.Unlock()
for _, sub := range cc.subs {
if sub.streamID == streamID && sub.subscription != nil {
if !sub.subscription.hasImage(sessionID) && sub.regID == subsRegID {
image := NewImage(sessionID, corrID, logbuffer.Wrap(logFilename))
image.subscriptionRegistrationID = sub.regID
image.sourceIdentity = sourceIdentity
image.subscriberPosition = NewPosition(cc.counterValuesBuffer, subscriberPositionID)
image.exceptionHandler = cc.errorHandler
logger.Debugf("OnAvailableImage: new image position: %v -> %d",
image.subscriberPosition, image.subscriberPosition.get())
sub.subscription.addImage(image)
if nil != cc.onAvailableImageHandler {
cc.onAvailableImageHandler(image)
}
}
}
}
}
func (cc *ClientConductor) OnUnavailableImage(corrID int64, subscriptionRegistrationID int64) {
logger.Debugf("OnUnavailableImage: corrID=%d subscriptionRegistrationID=%d", corrID, subscriptionRegistrationID)
cc.adminLock.Lock()
defer cc.adminLock.Unlock()
for _, sub := range cc.subs {
if sub.regID == subscriptionRegistrationID {
if sub.subscription != nil {
image := sub.subscription.removeImage(corrID)
if cc.onUnavailableImageHandler != nil {
cc.onUnavailableImageHandler(image)
}
cc.lingeringResources <- lingerResourse{time.Now().UnixNano(), image}
runtime.KeepAlive(image)
}
}
}
}
func (cc *ClientConductor) OnOperationSuccess(corrID int64) {
logger.Debugf("OnOperationSuccess: correlationId=%d", corrID)
cc.adminLock.Lock()
defer cc.adminLock.Unlock()
}
func (cc *ClientConductor) OnErrorResponse(corrID int64, errorCode int32, errorMessage string) {
logger.Debugf("OnErrorResponse: correlationID=%d, errorCode=%d, errorMessage=%s", corrID, errorCode, errorMessage)
cc.adminLock.Lock()
defer cc.adminLock.Unlock()
for _, pubDef := range cc.pubs {
if pubDef.regID == corrID {
pubDef.status = RegistrationStatus.ErroredMediaDriver
pubDef.errorCode = errorCode
pubDef.errorMessage = errorMessage
return
}
}
for _, subDef := range cc.pubs {
if subDef.regID == corrID {
subDef.status = RegistrationStatus.ErroredMediaDriver
subDef.errorCode = errorCode
subDef.errorMessage = errorMessage
}
}
}
func (cc *ClientConductor) onInterServiceTimeout(now int64) {
log.Printf("onInterServiceTimeout: now=%d", now)
err := cc.Close()
if err != nil {
logger.Warningf("Failed to close client conductor: %v", err)
cc.errorHandler(err)
}
}
func (cc *ClientConductor) onHeartbeatCheckTimeouts() int {
var result int
now := time.Now().UnixNano()
if now > (cc.timeOfLastDoWork + cc.interServiceTimeoutNs) {
cc.onInterServiceTimeout(now)
log.Fatalf("Timeout between service calls over %d ms (%d > %d + %d) (%d)",
cc.interServiceTimeoutNs/time.Millisecond.Nanoseconds(),
now/time.Millisecond.Nanoseconds(),
cc.timeOfLastDoWork,
cc.interServiceTimeoutNs/time.Millisecond.Nanoseconds(),
(now-cc.timeOfLastDoWork)/time.Millisecond.Nanoseconds())
}
cc.timeOfLastDoWork = now
if now > (cc.timeOfLastKeepalive + keepaliveTimeoutNS) {
cc.driverProxy.SendClientKeepalive()
hbTime := cc.driverProxy.TimeOfLastDriverKeepalive() * time.Millisecond.Nanoseconds()
if now > (hbTime + cc.driverTimeoutNs) {
cc.driverActive.Set(false)
log.Fatalf("Driver has been inactive for over %d ms",
cc.driverTimeoutNs/time.Millisecond.Nanoseconds())
}
cc.timeOfLastKeepalive = now
result = 1
}
if now > (cc.timeOfLastCheckManagedResources + resourceTimeoutNS) {
cc.onCheckManagedResources(now)
cc.timeOfLastCheckManagedResources = now
result = 1
}
return result
}
func (cc *ClientConductor) onCheckManagedResources(now int64) {
moreToCheck := true
for moreToCheck {
select {
case r := <-cc.lingeringResources:
logger.Debugf("Resource to linger: %v", r)
if cc.resourceLingerTimeoutNs < now-r.lastTime {
res := r.resource
logger.Debugf("lingering resource expired(%dms old): %v",
(now-r.lastTime)/time.Millisecond.Nanoseconds(), res)
if res != nil {
err := res.Close()
if err != nil {
logger.Warningf("Failed to close lingering resource: %v", err)
cc.errorHandler(err)
}
}
} else {
// The assumption is that resources are queued in order
moreToCheck = false
// FIXME ..and we're breaking it here, but since there is no peek...
cc.lingeringResources <- r
}
default:
moreToCheck = false
}
}
}
func (cc *ClientConductor) isPublicationConnected(timeOfLastStatusMessage int64) bool {
return time.Now().UnixNano() <= (timeOfLastStatusMessage*int64(time.Millisecond) + cc.publicationConnectionTimeoutNs)
}
// Return the counter reader
func (cc *ClientConductor) CounterReader() *ctr.Reader {
return cc.counterReader
}
|
}
cc.running.Set(false)
}
}
|
lock.go
|
package command
import (
"fmt"
"os"
"path"
"strings"
"sync"
"syscall"
"time"
"github.com/hashicorp/consul/agent"
"github.com/hashicorp/consul/api"
)
const (
// lockKillGracePeriod is how long we allow a child between
// a SIGTERM and a SIGKILL. This is to let the child cleanup
// any necessary state. We have to balance this with the risk
// of a split-brain where multiple children may be acting as if
// they hold a lock. This value is currently based on the default
// lock-delay value of 15 seconds. This only affects locks and not
// semaphores.
lockKillGracePeriod = 5 * time.Second
// defaultMonitorRetry is the number of 500 errors we will tolerate
// before declaring the lock gone.
defaultMonitorRetry = 3
// defaultMonitorRetryTime is the amount of time to wait between
// retries.
defaultMonitorRetryTime = 1 * time.Second
)
// LockCommand is a Command implementation that is used to setup
// a "lock" which manages lock acquisition and invokes a sub-process
type LockCommand struct {
BaseCommand
ShutdownCh <-chan struct{}
child *os.Process
childLock sync.Mutex
verbose bool
}
func (c *LockCommand) Help() string {
helpText := `
Usage: consul lock [options] prefix child...
Acquires a lock or semaphore at a given path, and invokes a child process
when successful. The child process can assume the lock is held while it
executes. If the lock is lost or communication is disrupted the child
process will be sent a SIGTERM signal and given time to gracefully exit.
After the grace period expires the process will be hard terminated.
For Consul agents on Windows, the child process is always hard terminated
with a SIGKILL, since Windows has no POSIX compatible notion for SIGTERM.
When -n=1, only a single lock holder or leader exists providing mutual
exclusion. Setting a higher value switches to a semaphore allowing multiple
holders to coordinate.
The prefix provided must have write privileges.
` + c.BaseCommand.Help()
return strings.TrimSpace(helpText)
}
func (c *LockCommand) Run(args []string) int {
var lu *LockUnlock
return c.run(args, &lu)
}
func (c *LockCommand) run(args []string, lu **LockUnlock) int {
var childDone chan struct{}
var limit int
var monitorRetry int
var name string
var passStdin bool
var timeout time.Duration
f := c.BaseCommand.NewFlagSet(c)
f.IntVar(&limit, "n", 1,
"Optional limit on the number of concurrent lock holders. The underlying "+
"implementation switches from a lock to a semaphore when the value is "+
"greater than 1. The default value is 1.")
f.IntVar(&monitorRetry, "monitor-retry", defaultMonitorRetry,
"Number of times to retry if Consul returns a 500 error while monitoring "+
"the lock. This allows riding out brief periods of unavailability "+
"without causing leader elections, but increases the amount of time "+
"required to detect a lost lock in some cases. The default value is 3, "+
"with a 1s wait between retries. Set this value to 0 to disable retires.")
f.StringVar(&name, "name", "",
"Optional name to associate with the lock session. It not provided, one "+
"is generated based on the provided child command.")
f.BoolVar(&passStdin, "pass-stdin", false,
"Pass stdin to the child process.")
f.DurationVar(&timeout, "timeout", 0,
"Maximum amount of time to wait to acquire the lock, specified as a "+
"timestamp like \"1s\" or \"3h\". The default value is 0.")
f.BoolVar(&c.verbose, "verbose", false,
"Enable verbose (debugging) output.")
// Deprecations
f.DurationVar(&timeout, "try", 0,
"DEPRECATED. Use -timeout instead.")
if err := c.BaseCommand.Parse(args); err != nil {
return 1
}
// Check the limit
if limit <= 0 {
c.UI.Error(fmt.Sprintf("Lock holder limit must be positive"))
return 1
}
// Verify the prefix and child are provided
extra := f.Args()
if len(extra) < 2 {
c.UI.Error("Key prefix and child command must be specified")
return 1
}
prefix := extra[0]
prefix = strings.TrimPrefix(prefix, "/")
script := strings.Join(extra[1:], " ")
if timeout < 0 {
c.UI.Error("Timeout must be positive")
return 1
}
// Calculate a session name if none provided
if name == "" {
|
// Calculate oneshot
oneshot := timeout > 0
// Check the retry parameter
if monitorRetry < 0 {
c.UI.Error("Number for 'monitor-retry' must be >= 0")
return 1
}
// Create and test the HTTP client
client, err := c.BaseCommand.HTTPClient()
if err != nil {
c.UI.Error(fmt.Sprintf("Error connecting to Consul agent: %s", err))
return 1
}
_, err = client.Agent().NodeName()
if err != nil {
c.UI.Error(fmt.Sprintf("Error querying Consul agent: %s", err))
return 1
}
// Setup the lock or semaphore
if limit == 1 {
*lu, err = c.setupLock(client, prefix, name, oneshot, timeout, monitorRetry)
} else {
*lu, err = c.setupSemaphore(client, limit, prefix, name, oneshot, timeout, monitorRetry)
}
if err != nil {
c.UI.Error(fmt.Sprintf("Lock setup failed: %s", err))
return 1
}
// Attempt the acquisition
if c.verbose {
c.UI.Info("Attempting lock acquisition")
}
lockCh, err := (*lu).lockFn(c.ShutdownCh)
if lockCh == nil {
if err == nil {
c.UI.Error("Shutdown triggered or timeout during lock acquisition")
} else {
c.UI.Error(fmt.Sprintf("Lock acquisition failed: %s", err))
}
return 1
}
// Check if we were shutdown but managed to still acquire the lock
select {
case <-c.ShutdownCh:
c.UI.Error("Shutdown triggered during lock acquisition")
goto RELEASE
default:
}
// Start the child process
childDone = make(chan struct{})
go func() {
if err := c.startChild(script, childDone, passStdin); err != nil {
c.UI.Error(fmt.Sprintf("%s", err))
}
}()
// Monitor for shutdown, child termination, or lock loss
select {
case <-c.ShutdownCh:
if c.verbose {
c.UI.Info("Shutdown triggered, killing child")
}
case <-lockCh:
if c.verbose {
c.UI.Info("Lock lost, killing child")
}
case <-childDone:
if c.verbose {
c.UI.Info("Child terminated, releasing lock")
}
goto RELEASE
}
// Prevent starting a new child. The lock is never released
// after this point.
c.childLock.Lock()
// Kill any existing child
if err := c.killChild(childDone); err != nil {
c.UI.Error(fmt.Sprintf("%s", err))
}
RELEASE:
// Release the lock before termination
if err := (*lu).unlockFn(); err != nil {
c.UI.Error(fmt.Sprintf("Lock release failed: %s", err))
return 1
}
// Cleanup the lock if no longer in use
if err := (*lu).cleanupFn(); err != nil {
if err != (*lu).inUseErr {
c.UI.Error(fmt.Sprintf("Lock cleanup failed: %s", err))
return 1
} else if c.verbose {
c.UI.Info("Cleanup aborted, lock in use")
}
} else if c.verbose {
c.UI.Info("Cleanup succeeded")
}
return 0
}
// setupLock is used to setup a new Lock given the API client, the key prefix to
// operate on, and an optional session name. If oneshot is true then we will set
// up for a single attempt at acquisition, using the given wait time. The retry
// parameter sets how many 500 errors the lock monitor will tolerate before
// giving up the lock.
func (c *LockCommand) setupLock(client *api.Client, prefix, name string,
oneshot bool, wait time.Duration, retry int) (*LockUnlock, error) {
// Use the DefaultSemaphoreKey extension, this way if a lock and
// semaphore are both used at the same prefix, we will get a conflict
// which we can report to the user.
key := path.Join(prefix, api.DefaultSemaphoreKey)
if c.verbose {
c.UI.Info(fmt.Sprintf("Setting up lock at path: %s", key))
}
opts := api.LockOptions{
Key: key,
SessionName: name,
MonitorRetries: retry,
MonitorRetryTime: defaultMonitorRetryTime,
}
if oneshot {
opts.LockTryOnce = true
opts.LockWaitTime = wait
}
l, err := client.LockOpts(&opts)
if err != nil {
return nil, err
}
lu := &LockUnlock{
lockFn: l.Lock,
unlockFn: l.Unlock,
cleanupFn: l.Destroy,
inUseErr: api.ErrLockInUse,
rawOpts: &opts,
}
return lu, nil
}
// setupSemaphore is used to setup a new Semaphore given the API client, key
// prefix, session name, and slot holder limit. If oneshot is true then we will
// set up for a single attempt at acquisition, using the given wait time. The
// retry parameter sets how many 500 errors the lock monitor will tolerate
// before giving up the semaphore.
func (c *LockCommand) setupSemaphore(client *api.Client, limit int, prefix, name string,
oneshot bool, wait time.Duration, retry int) (*LockUnlock, error) {
if c.verbose {
c.UI.Info(fmt.Sprintf("Setting up semaphore (limit %d) at prefix: %s", limit, prefix))
}
opts := api.SemaphoreOptions{
Prefix: prefix,
Limit: limit,
SessionName: name,
MonitorRetries: retry,
MonitorRetryTime: defaultMonitorRetryTime,
}
if oneshot {
opts.SemaphoreTryOnce = true
opts.SemaphoreWaitTime = wait
}
s, err := client.SemaphoreOpts(&opts)
if err != nil {
return nil, err
}
lu := &LockUnlock{
lockFn: s.Acquire,
unlockFn: s.Release,
cleanupFn: s.Destroy,
inUseErr: api.ErrSemaphoreInUse,
rawOpts: &opts,
}
return lu, nil
}
// startChild is a long running routine used to start and
// wait for the child process to exit.
func (c *LockCommand) startChild(script string, doneCh chan struct{}, passStdin bool) error {
defer close(doneCh)
if c.verbose {
c.UI.Info(fmt.Sprintf("Starting handler '%s'", script))
}
// Create the command
cmd, err := agent.ExecScript(script)
if err != nil {
c.UI.Error(fmt.Sprintf("Error executing handler: %s", err))
return err
}
// Setup the command streams
cmd.Env = append(os.Environ(),
"CONSUL_LOCK_HELD=true",
)
if passStdin {
if c.verbose {
c.UI.Info("Stdin passed to handler process")
}
cmd.Stdin = os.Stdin
} else {
cmd.Stdin = nil
}
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
// Start the child process
c.childLock.Lock()
if err := cmd.Start(); err != nil {
c.UI.Error(fmt.Sprintf("Error starting handler: %s", err))
c.childLock.Unlock()
return err
}
// Setup the child info
c.child = cmd.Process
c.childLock.Unlock()
// Wait for the child process
if err := cmd.Wait(); err != nil {
c.UI.Error(fmt.Sprintf("Error running handler: %s", err))
return err
}
return nil
}
// killChild is used to forcefully kill the child, first using SIGTERM
// to allow for a graceful cleanup and then using SIGKILL for a hard
// termination.
// On Windows, the child is always hard terminated with a SIGKILL, even
// on the first attempt.
func (c *LockCommand) killChild(childDone chan struct{}) error {
// Get the child process
child := c.child
// If there is no child process (failed to start), we can quit early
if child == nil {
if c.verbose {
c.UI.Info("No child process to kill")
}
return nil
}
// Attempt termination first
if c.verbose {
c.UI.Info(fmt.Sprintf("Terminating child pid %d", child.Pid))
}
if err := signalPid(child.Pid, syscall.SIGTERM); err != nil {
return fmt.Errorf("Failed to terminate %d: %v", child.Pid, err)
}
// Wait for termination, or until a timeout
select {
case <-childDone:
if c.verbose {
c.UI.Info("Child terminated")
}
return nil
case <-time.After(lockKillGracePeriod):
if c.verbose {
c.UI.Info(fmt.Sprintf("Child did not exit after grace period of %v",
lockKillGracePeriod))
}
}
// Send a final SIGKILL
if c.verbose {
c.UI.Info(fmt.Sprintf("Killing child pid %d", child.Pid))
}
if err := signalPid(child.Pid, syscall.SIGKILL); err != nil {
return fmt.Errorf("Failed to kill %d: %v", child.Pid, err)
}
return nil
}
func (c *LockCommand) Synopsis() string {
return "Execute a command holding a lock"
}
// LockUnlock is used to abstract over the differences between
// a lock and a semaphore.
type LockUnlock struct {
lockFn func(<-chan struct{}) (<-chan struct{}, error)
unlockFn func() error
cleanupFn func() error
inUseErr error
rawOpts interface{}
}
|
name = fmt.Sprintf("Consul lock for '%s' at '%s'", script, prefix)
}
|
callback_query.go
|
package gorm
import (
"errors"
"fmt"
"reflect"
)
// Define callbacks for querying
func init() {
DefaultCallback.Query().Register("gorm:query", queryCallback)
DefaultCallback.Query().Register("gorm:preload", preloadCallback)
DefaultCallback.Query().Register("gorm:after_query", afterQueryCallback)
}
// queryCallback used to query data from database
func queryCallback(scope *Scope)
|
// afterQueryCallback will invoke `AfterFind` method after querying
func afterQueryCallback(scope *Scope) {
if !scope.HasError() {
scope.CallMethod("AfterFind")
}
}
|
{
if _, skip := scope.InstanceGet("gorm:skip_query_callback"); skip {
return
}
//we are only preloading relations, dont touch base model
if _, skip := scope.InstanceGet("gorm:only_preload"); skip {
return
}
defer scope.trace(NowFunc())
var (
isSlice, isPtr bool
resultType reflect.Type
results = scope.IndirectValue()
)
if orderBy, ok := scope.Get("gorm:order_by_primary_key"); ok {
if primaryField := scope.PrimaryField(); primaryField != nil {
scope.Search.Order(fmt.Sprintf("%v.%v %v", scope.QuotedTableName(), scope.Quote(primaryField.DBName), orderBy))
}
}
if value, ok := scope.Get("gorm:query_destination"); ok {
results = indirect(reflect.ValueOf(value))
}
if kind := results.Kind(); kind == reflect.Slice {
isSlice = true
resultType = results.Type().Elem()
results.Set(reflect.MakeSlice(results.Type(), 0, 0))
if resultType.Kind() == reflect.Ptr {
isPtr = true
resultType = resultType.Elem()
}
} else if kind != reflect.Struct {
scope.Err(errors.New("unsupported destination, should be slice or struct"))
return
}
scope.prepareQuerySQL()
if !scope.HasError() {
scope.db.RowsAffected = 0
if str, ok := scope.Get("gorm:query_hint"); ok {
scope.SQL = fmt.Sprint(str) + scope.SQL
}
if rows, err := scope.SQLDB().Query(scope.SQL, scope.SQLVars...); scope.Err(err) == nil {
defer rows.Close()
columns, _ := rows.Columns()
for rows.Next() {
scope.db.RowsAffected++
elem := results
if isSlice {
elem = reflect.New(resultType).Elem()
}
scope.scan(rows, columns, scope.New(elem.Addr().Interface()).Fields())
if isSlice {
if isPtr {
results.Set(reflect.Append(results, elem.Addr()))
} else {
results.Set(reflect.Append(results, elem))
}
}
}
if err := rows.Err(); err != nil {
scope.Err(err)
} else if scope.db.RowsAffected == 0 && !isSlice {
scope.Err(ErrRecordNotFound)
}
}
}
}
|
lib.rs
|
pub mod builtin_functions;
pub mod default_env;
pub mod eval;
pub mod parser;
pub mod runner;
pub mod types;
#[cfg(test)]
mod tests {
use crate::{default_env::default_env, eval::parse_and_eval};
#[test]
fn
|
() {
/*
* Immediately invoked lambda expression
*/
let mut env = default_env();
let expr = "((lambda (x) (+ x 10)) 10)".to_string();
let eval_expr = parse_and_eval(expr, &mut env).unwrap();
assert_eq!(format!("{}", eval_expr), "20");
}
#[test]
fn list_test() {
let mut env = default_env();
let list_fn = "(list 1 2 3)".to_string();
let ans = parse_and_eval(list_fn, &mut env).unwrap();
assert_eq!(format!("{}", ans), "(1 2 3)");
let mapper = "(map (lambda (x) (+ x x)) '(1 2 3 4))".to_string();
let ans = parse_and_eval(mapper, &mut env).unwrap();
assert_eq!(format!("{}", ans), "(2 4 6 8)");
}
#[test]
fn square() {
let square_fn = "(define square (lambda (x) (* x x)))";
let mut env = default_env();
parse_and_eval(square_fn.to_string(), &mut env).unwrap();
let sq_10 = parse_and_eval("(square 10)".to_string(), &mut env).unwrap();
assert_eq!(format!("{}", sq_10), "100");
let sq = parse_and_eval("(square 5.5)".to_string(), &mut env).unwrap();
assert_eq!(format!("{}", sq), "30.25");
}
#[test]
fn fact() {
let mut env = default_env();
let fact_fn = "(define fact (lambda (n) (if (<= n 1) 1 (* n (fact (- n 1))))))".to_string();
parse_and_eval(fact_fn, &mut env).unwrap();
let fact_10 = parse_and_eval("(fact 10)".to_string(), &mut env).unwrap();
assert_eq!(format!("{}", fact_10), "3628800");
let fact_0 = parse_and_eval("(fact 0)".to_string(), &mut env).unwrap();
assert_eq!(format!("{}", fact_0), "1");
let fact_20 = parse_and_eval("(fact 20)".to_string(), &mut env).unwrap();
assert_eq!(format!("{}", fact_20), "2432902008176640000");
}
#[test]
fn fib() {
let mut env = default_env();
let fib_fn = "(define (fib x) (
if (<= x 1) x
(+ (fib (- x 1)) (fib (- x 2)))
))"
.to_string();
parse_and_eval(fib_fn, &mut env).unwrap();
let fib_2 = parse_and_eval("(fib 2)".to_string(), &mut env).unwrap();
assert_eq!(format!("{}", fib_2), "1");
let fib_10 = parse_and_eval("(fib 10)".to_string(), &mut env).unwrap();
assert_eq!(format!("{}", fib_10), "55");
}
#[test]
fn test() {}
}
|
iile
|
utils.go
|
// Package utils implements utilities used across different
// areas of the sish application. There are utility functions
// that help with overall state management and are core to the application.
package utils
import (
"bytes"
"crypto/ed25519"
"crypto/rand"
"encoding/pem"
"fmt"
"io"
"io/fs"
"io/ioutil"
"log"
mathrand "math/rand"
"net"
"net/url"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/ScaleFT/sshkeys"
"github.com/caddyserver/certmagic"
"github.com/jpillora/ipfilter"
"github.com/logrusorgru/aurora"
"github.com/mikesmitty/edkey"
"github.com/pires/go-proxyproto"
"github.com/radovskyb/watcher"
"github.com/spf13/viper"
"github.com/vulcand/oxy/roundrobin"
"golang.org/x/crypto/ssh"
)
const (
// sishDNSPrefix is the prefix used for DNS TXT records.
sishDNSPrefix = "sish="
)
var (
// SshFilter is the IPFilter used to block ssh connections.
SshFilter *ipfilter.IPFilter
// ClientFilter is the IPFilter used to block client connections.
ClientFilter *ipfilter.IPFilter
// certHolder is a slice of publickeys for auth.
certHolder = make([]ssh.PublicKey, 0)
// holderLock is the mutex used to update the certHolder slice.
holderLock = sync.Mutex{}
// bannedSubdomainList is a list of subdomains that cannot be bound.
bannedSubdomainList = []string{""}
// bannedAliasList is a list of aliases that cannot be bound.
bannedAliasList = []string{""}
// multiWriter is the writer that can be used for writing to multiple locations.
multiWriter io.Writer
)
// Setup main utils. This initializes, whitelists, blacklists,
// and log writers.
func Setup(logWriter io.Writer) {
multiWriter = logWriter
upperList := func(stringList string) []string {
list := strings.FieldsFunc(stringList, CommaSplitFields)
for k, v := range list {
list[k] = strings.ToUpper(v)
}
return list
}
whitelistedCountriesList := upperList(viper.GetString("whitelisted-countries"))
whitelistedIPList := strings.FieldsFunc(viper.GetString("whitelisted-ips"), CommaSplitFields)
whitelistedSSHIPList := strings.FieldsFunc(viper.GetString("whitelisted-ssh-ips"), CommaSplitFields)
if len(whitelistedSSHIPList) == 0 {
whitelistedSSHIPList = whitelistedIPList
}
sshIpfilterOpts := ipfilter.Options{
BlockedCountries: upperList(viper.GetString("banned-countries")),
AllowedCountries: whitelistedCountriesList,
BlockedIPs: strings.FieldsFunc(viper.GetString("banned-ips"), CommaSplitFields),
AllowedIPs: whitelistedSSHIPList,
BlockByDefault: len(whitelistedSSHIPList) > 0 || len(whitelistedSSHIPList) > 0,
}
clientIpfilterOpts := ipfilter.Options{
BlockedCountries: upperList(viper.GetString("banned-countries")),
AllowedCountries: whitelistedCountriesList,
BlockedIPs: strings.FieldsFunc(viper.GetString("banned-ips"), CommaSplitFields),
AllowedIPs: whitelistedIPList,
BlockByDefault: len(whitelistedIPList) > 0 || len(whitelistedCountriesList) > 0,
}
if viper.GetBool("geodb") {
SshFilter = ipfilter.NewLazy(sshIpfilterOpts)
ClientFilter = ipfilter.NewLazy(clientIpfilterOpts)
} else {
SshFilter = ipfilter.NewNoDB(sshIpfilterOpts)
ClientFilter = ipfilter.NewNoDB(clientIpfilterOpts)
}
bannedSubdomainList = append(bannedSubdomainList, strings.FieldsFunc(viper.GetString("banned-subdomains"), CommaSplitFields)...)
for k, v := range bannedSubdomainList {
bannedSubdomainList[k] = strings.ToLower(strings.TrimSpace(v) + "." + viper.GetString("domain"))
}
bannedAliasList = append(bannedAliasList, strings.FieldsFunc(viper.GetString("banned-aliases"), CommaSplitFields)...)
for k, v := range bannedAliasList {
bannedAliasList[k] = strings.ToLower(strings.TrimSpace(v))
}
}
// CommaSplitFields is a function used by strings.FieldsFunc to split around commas.
func CommaSplitFields(c rune) bool {
return c == ','
}
// LoadProxyProtoConfig will load the timeouts and policies for the proxy protocol.
func LoadProxyProtoConfig(l *proxyproto.Listener) {
if viper.GetBool("proxy-protocol-use-timeout") {
l.ReadHeaderTimeout = viper.GetDuration("proxy-protocol-timeout")
l.Policy = func(upstream net.Addr) (proxyproto.Policy, error) {
switch viper.GetString("proxy-protocol-policy") {
case "ignore":
return proxyproto.IGNORE, nil
case "reject":
return proxyproto.REJECT, nil
case "require":
return proxyproto.REQUIRE, nil
}
return proxyproto.USE, nil
}
}
}
// GetRandomPortInRange returns a random port in the provided range.
// The port range is a comma separated list of ranges or ports.
func GetRandomPortInRange(portRange string) uint32 {
var bindPort uint32
ranges := strings.Split(strings.TrimSpace(portRange), ",")
possible := [][]uint64{}
for _, r := range ranges {
ends := strings.Split(strings.TrimSpace(r), "-")
if len(ends) == 1 {
ui, err := strconv.ParseUint(ends[0], 0, 64)
if err != nil {
return 0
}
possible = append(possible, []uint64{uint64(ui)})
} else if len(ends) == 2 {
ui1, err := strconv.ParseUint(ends[0], 0, 64)
if err != nil {
return 0
}
ui2, err := strconv.ParseUint(ends[1], 0, 64)
if err != nil {
return 0
}
possible = append(possible, []uint64{uint64(ui1), uint64(ui2)})
}
}
mathrand.Seed(time.Now().UnixNano())
locHolder := mathrand.Intn(len(possible))
if len(possible[locHolder]) == 1 {
bindPort = uint32(possible[locHolder][0])
} else if len(possible[locHolder]) == 2 {
bindPort = uint32(mathrand.Intn(int(possible[locHolder][1]-possible[locHolder][0])) + int(possible[locHolder][0]))
}
ln, err := net.Listen("tcp", fmt.Sprintf(":%d", bindPort))
if err != nil {
return GetRandomPortInRange(portRange)
}
ln.Close()
return bindPort
}
// CheckPort verifies if a port exists within the port range.
// It will return 0 and an error if not (0 allows the kernel to select)
// the port.
func CheckPort(port uint32, portRanges string) (uint32, error) {
ranges := strings.Split(strings.TrimSpace(portRanges), ",")
checks := false
for _, r := range ranges {
ends := strings.Split(strings.TrimSpace(r), "-")
if len(ends) == 1 {
ui, err := strconv.ParseUint(ends[0], 0, 64)
if err != nil {
return 0, err
}
if uint64(ui) == uint64(port) {
checks = true
continue
}
} else if len(ends) == 2 {
ui1, err := strconv.ParseUint(ends[0], 0, 64)
if err != nil {
return 0, err
}
ui2, err := strconv.ParseUint(ends[1], 0, 64)
if err != nil {
return 0, err
}
if uint64(port) >= ui1 && uint64(port) <= ui2 {
checks = true
continue
}
}
}
if checks {
return port, nil
}
return 0, fmt.Errorf("not a safe port")
}
func loadCerts(certManager *certmagic.Config) {
certFiles, err := filepath.Glob(filepath.Join(viper.GetString("https-certificate-directory"), "*.crt"))
if err != nil {
log.Println("Error loading unmanaged certificates:", err)
}
for _, v := range certFiles {
err := certManager.CacheUnmanagedCertificatePEMFile(v, fmt.Sprintf("%s.key", strings.TrimSuffix(v, ".crt")), []string{})
if err != nil {
log.Println("Error loading unmanaged certificate:", err)
}
}
}
func loadPrivateKeys(config *ssh.ServerConfig) {
count := 0
parseKey := func(data []byte, directory fs.DirEntry) {
key, err := ssh.ParsePrivateKey(data)
if _, ok := err.(*ssh.PassphraseMissingError); ok {
key, err = ssh.ParsePrivateKeyWithPassphrase(data, []byte(viper.GetString("private-key-passphrase")))
}
if err != nil {
log.Printf("Error parsing private key file %s: %s\n", directory.Name(), err)
return
}
log.Printf("Loading %s as %s host key", directory.Name(), key.PublicKey().Type())
config.AddHostKey(key)
count++
}
err := filepath.WalkDir(viper.GetString("private-keys-directory"), func(path string, d fs.DirEntry, err error) error {
if d.IsDir() {
return nil
}
if err != nil {
log.Printf("Error walking file %s for private key: %s\n", d.Name(), err)
return nil
}
i, e := ioutil.ReadFile(path)
if e != nil {
log.Printf("Can't read file %s as private key: %s\n", d.Name(), err)
return nil
}
if len(i) > 0 {
parseKey(i, d)
}
return nil
})
if err != nil {
log.Printf("Unable to walk private-keys-directory %s: %s\n", viper.GetString("private-keys-directory"), err)
}
if count == 0 {
config.AddHostKey(loadPrivateKey(viper.GetString("private-key-passphrase")))
}
}
// WatchCerts watches https certs for changes and will load them.
func WatchCerts(certManager *certmagic.Config) {
loadCerts(certManager)
w := watcher.New()
w.SetMaxEvents(1)
if err := w.AddRecursive(viper.GetString("https-certificate-directory")); err != nil {
log.Fatalln(err)
}
go func() {
w.Wait()
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for range c {
w.Close()
os.Exit(0)
}
}()
for {
select {
case _, ok := <-w.Event:
if !ok {
return
}
loadCerts(certManager)
case _, ok := <-w.Error:
if !ok {
return
}
}
}
}()
go func() {
if err := w.Start(viper.GetDuration("https-certificate-directory-watch-interval")); err != nil {
log.Fatalln(err)
}
}()
}
// WatchKeys watches ssh keys for changes and will load them.
func WatchKeys() {
loadKeys()
w := watcher.New()
w.SetMaxEvents(1)
if err := w.AddRecursive(viper.GetString("authentication-keys-directory")); err != nil {
log.Fatalln(err)
}
go func() {
w.Wait()
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for range c {
w.Close()
os.Exit(0)
}
}()
for {
select {
case _, ok := <-w.Event:
if !ok {
return
}
loadKeys()
case _, ok := <-w.Error:
if !ok {
return
}
}
}
}()
go func() {
if err := w.Start(viper.GetDuration("authentication-keys-directory-watch-interval")); err != nil {
log.Fatalln(err)
}
}()
}
// loadKeys loads public keys from the keys directory into a slice that is used
// authenticating a user.
func loadKeys() {
tmpCertHolder := make([]ssh.PublicKey, 0)
parseKey := func(keyBytes []byte, d fs.DirEntry) {
keyHandle := func(keyBytes []byte, d fs.DirEntry) []byte {
key, _, _, rest, e := ssh.ParseAuthorizedKey(keyBytes)
if e != nil {
if e.Error() != "ssh: no key found" || (e.Error() == "ssh: no key found" && viper.GetBool("debug")) {
log.Printf("Can't load file %s:\"%s\" as public key: %s\n", d.Name(), string(keyBytes), e)
}
}
if key != nil {
tmpCertHolder = append(tmpCertHolder, key)
}
return rest
}
for ok := true; ok; ok = len(keyBytes) > 0 {
keyBytes = keyHandle(keyBytes, d)
}
}
err := filepath.WalkDir(viper.GetString("authentication-keys-directory"), func(path string, d fs.DirEntry, err error) error {
if d.IsDir() {
return nil
}
if err != nil {
log.Printf("Error walking file %s for public key: %s\n", d.Name(), err)
return nil
}
i, e := ioutil.ReadFile(path)
if e != nil {
log.Printf("Can't read file %s as public key: %s\n", d.Name(), err)
return nil
}
if len(i) > 0 {
parseKey(i, d)
}
return nil
})
if err != nil {
log.Printf("Unable to walk authentication-keys-directory %s: %s\n", viper.GetString("authentication-keys-directory"), err)
return
}
holderLock.Lock()
defer holderLock.Unlock()
certHolder = tmpCertHolder
}
// GetSSHConfig Returns an SSH config for the ssh muxer.
// It handles auth and storing user connection information.
func
|
() *ssh.ServerConfig {
sshConfig := &ssh.ServerConfig{
NoClientAuth: !viper.GetBool("authentication"),
PasswordCallback: func(c ssh.ConnMetadata, password []byte) (*ssh.Permissions, error) {
log.Printf("Login attempt: %s, user %s", c.RemoteAddr(), c.User())
if string(password) == viper.GetString("authentication-password") && viper.GetString("authentication-password") != "" {
return nil, nil
}
return nil, fmt.Errorf("password doesn't match")
},
PublicKeyCallback: func(c ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
authKey := ssh.MarshalAuthorizedKey(key)
authKey = authKey[:len(authKey)-1]
log.Printf("Login attempt: %s, user %s key: %s", c.RemoteAddr(), c.User(), string(authKey))
holderLock.Lock()
defer holderLock.Unlock()
for _, i := range certHolder {
if bytes.Equal(key.Marshal(), i.Marshal()) {
permssionsData := &ssh.Permissions{
Extensions: map[string]string{
"pubKey": string(authKey),
"pubKeyFingerprint": ssh.FingerprintSHA256(key),
},
}
return permssionsData, nil
}
}
return nil, fmt.Errorf("public key doesn't match")
},
}
loadPrivateKeys(sshConfig)
return sshConfig
}
// generatePrivateKey creates a new ed25519 private key to be used by the
// the SSH server as the host key.
func generatePrivateKey(passphrase string) []byte {
_, pk, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
log.Fatal(err)
}
log.Println("Generated ED25519 Keypair")
// In an effort to guarantee that keys can still be loaded by OpenSSH
// we adopt branching logic here for passphrase encrypted keys.
// I wrote a module that handled both, but ultimately decided this
// is likely cleaner and less specialized.
var pemData []byte
if passphrase != "" {
pemData, err = sshkeys.Marshal(pk, &sshkeys.MarshalOptions{
Passphrase: []byte(passphrase),
Format: sshkeys.FormatOpenSSHv1,
})
if err != nil {
log.Fatal(err)
}
} else {
pemBlock := &pem.Block{
Type: "OPENSSH PRIVATE KEY",
Bytes: edkey.MarshalED25519PrivateKey(pk),
}
pemData = pem.EncodeToMemory(pemBlock)
}
err = ioutil.WriteFile(filepath.Join(viper.GetString("private-keys-directory"), "ssh_key"), pemData, 0600)
if err != nil {
log.Println("Error writing to file:", err)
}
return pemData
}
// ParsePrivateKey parses the PrivateKey into a ssh.Signer and
// let's it be used by the SSH server.
func loadPrivateKey(passphrase string) ssh.Signer {
var signer ssh.Signer
pk, err := ioutil.ReadFile(filepath.Join(viper.GetString("private-keys-directory"), "ssh_key"))
if err != nil {
log.Println("Error loading private key, generating a new one:", err)
pk = generatePrivateKey(passphrase)
}
if passphrase != "" {
signer, err = ssh.ParsePrivateKeyWithPassphrase(pk, []byte(passphrase))
if err != nil {
log.Fatal(err)
}
} else {
signer, err = ssh.ParsePrivateKey(pk)
if err != nil {
log.Fatal(err)
}
}
return signer
}
// inList is used to scan whether or not something exists
// in a slice of data.
func inList(host string, bannedList []string) bool {
for _, v := range bannedList {
if strings.TrimSpace(v) == host {
return true
}
}
return false
}
// verifyDNS will verify that a specific domain/subdomain combo matches
// the specific TXT entry that exists for the domain. It will check that the
// publickey used for auth is at least included in the TXT records for the domain.
func verifyDNS(addr string, sshConn *SSHConnection) (bool, string, error) {
if !viper.GetBool("verify-dns") || sshConn.SSHConn.Permissions == nil {
return false, "", nil
}
if _, ok := sshConn.SSHConn.Permissions.Extensions["pubKeyFingerprint"]; !ok {
return false, "", nil
}
records, err := net.LookupTXT(addr)
for _, v := range records {
if strings.HasPrefix(v, sishDNSPrefix) {
dnsPubKeyFingerprint := strings.TrimSpace(strings.TrimPrefix(v, sishDNSPrefix))
match := sshConn.SSHConn.Permissions.Extensions["pubKeyFingerprint"] == dnsPubKeyFingerprint
if match {
return match, dnsPubKeyFingerprint, err
}
}
}
return false, "", nil
}
// GetOpenPort returns open ports that can be bound. It verifies the host to
// bind the port to and attempts to listen to the port to ensure it is open.
// If load balancing is enabled, it will return the port if used.
func GetOpenPort(addr string, port uint32, state *State, sshConn *SSHConnection, sniProxyEnabled bool) (string, uint32, *TCPHolder) {
getUnusedPort := func() (string, uint32, *TCPHolder) {
var tH *TCPHolder
var bindErr error
first := true
bindPort := port
bindAddr := addr
listenAddr := ""
if (bindAddr == "localhost" && viper.GetBool("localhost-as-all")) || viper.GetBool("force-tcp-address") || sniProxyEnabled {
bindAddr = viper.GetString("tcp-address")
}
reportUnavailable := func(unavailable bool) {
if first && unavailable {
extra := " Assigning a random port."
if viper.GetBool("force-requested-ports") {
extra = ""
bindErr = fmt.Errorf("unable to bind requested port")
}
sshConn.SendMessage(aurora.Sprintf("The TCP port %s is unavailable.%s", aurora.Red(listenAddr), extra), true)
}
}
checkPort := func(checkerAddr string, checkerPort uint32) bool {
if bindErr != nil {
return false
}
listenAddr = fmt.Sprintf("%s:%d", bindAddr, bindPort)
checkedPort, err := CheckPort(checkerPort, viper.GetString("port-bind-range"))
_, ok := state.TCPListeners.Load(listenAddr)
if err == nil && (!viper.GetBool("tcp-load-balancer") || (viper.GetBool("tcp-load-balancer") && !ok) || (sniProxyEnabled && !ok)) {
ln, listenErr := net.Listen("tcp", listenAddr)
if listenErr != nil {
err = listenErr
} else {
ln.Close()
}
}
if viper.GetBool("bind-random-ports") || !first || err != nil {
reportUnavailable(true)
if viper.GetString("port-bind-range") != "" {
bindPort = GetRandomPortInRange(viper.GetString("port-bind-range"))
} else {
bindPort = 0
}
} else {
bindPort = checkedPort
}
listenAddr = fmt.Sprintf("%s:%d", bindAddr, bindPort)
holder, ok := state.TCPListeners.Load(listenAddr)
if ok && (!sniProxyEnabled && viper.GetBool("tcp-load-balancer") || (sniProxyEnabled && viper.GetBool("sni-load-balancer"))) {
tH = holder
ok = false
}
reportUnavailable(ok)
first = false
return ok
}
for checkPort(bindAddr, bindPort) {
}
return listenAddr, bindPort, tH
}
return getUnusedPort()
}
// GetOpenSNIHost returns an open SNI host or a random host if that one is unavailable.
func GetOpenSNIHost(addr string, state *State, sshConn *SSHConnection, tH *TCPHolder) (string, error) {
getUnusedHost := func() (string, error) {
first := true
hostExtension := ""
if viper.GetBool("append-user-to-subdomain") {
hostExtension = viper.GetString("append-user-to-subdomain-separator") + sshConn.SSHConn.User()
}
var bindErr error
dnsMatch, _, err := verifyDNS(addr, sshConn)
if err != nil && viper.GetBool("debug") {
log.Println("Error looking up txt records for domain:", addr)
}
proposedHost := fmt.Sprintf("%s%s.%s", addr, hostExtension, viper.GetString("domain"))
domainParts := strings.Join(strings.Split(addr, ".")[1:], ".")
if dnsMatch || (viper.GetBool("bind-any-host") && strings.Contains(addr, ".")) || inList(domainParts, strings.FieldsFunc(viper.GetString("bind-hosts"), CommaSplitFields)) {
proposedHost = addr
if proposedHost == fmt.Sprintf(".%s", viper.GetString("domain")) {
proposedHost = viper.GetString("domain")
}
}
if viper.GetBool("bind-root-domain") && proposedHost == fmt.Sprintf(".%s", viper.GetString("domain")) {
proposedHost = viper.GetString("domain")
}
host := strings.ToLower(proposedHost)
getRandomHost := func() string {
return strings.ToLower(RandStringBytesMaskImprSrc(viper.GetInt("bind-random-subdomains-length")) + "." + viper.GetString("domain"))
}
reportUnavailable := func(unavailable bool) {
if first && unavailable {
extra := " Assigning a random subdomain."
if viper.GetBool("force-requested-subdomains") {
extra = ""
bindErr = fmt.Errorf("unable to bind requested subdomain")
}
sshConn.SendMessage(aurora.Sprintf("The subdomain %s is unavailable.%s", aurora.Red(host), extra), true)
}
}
checkHost := func(checkHost string) bool {
if bindErr != nil {
return false
}
if viper.GetBool("bind-random-subdomains") || !first || inList(host, bannedSubdomainList) {
reportUnavailable(true)
host = getRandomHost()
}
ok := false
tH.Balancers.Range(func(strKey string, value *roundrobin.RoundRobin) bool {
if strKey == host {
ok = true
return false
}
return true
})
if ok && viper.GetBool("sni-load-balancer") {
ok = false
}
reportUnavailable(ok)
first = false
return ok
}
for checkHost(host) {
}
return host, bindErr
}
return getUnusedHost()
}
// GetOpenHost returns an open host or a random host if that one is unavailable.
// If load balancing is enabled, it will return the requested domain.
func GetOpenHost(addr string, state *State, sshConn *SSHConnection) (*url.URL, *HTTPHolder) {
getUnusedHost := func() (*url.URL, *HTTPHolder) {
var pH *HTTPHolder
first := true
hostExtension := ""
if viper.GetBool("append-user-to-subdomain") {
hostExtension = viper.GetString("append-user-to-subdomain-separator") + sshConn.SSHConn.User()
}
var username string
var password string
var path string
var bindErr error
if strings.Contains(addr, "@") {
hostParts := strings.SplitN(addr, "@", 2)
addr = hostParts[1]
if viper.GetBool("bind-http-auth") && len(hostParts[0]) > 0 {
authParts := strings.Split(hostParts[0], ":")
if len(authParts) > 0 {
username = authParts[0]
}
if len(authParts) > 1 {
password = authParts[1]
}
}
}
if strings.Contains(addr, "/") {
pathParts := strings.SplitN(addr, "/", 2)
if viper.GetBool("bind-http-path") && len(pathParts[1]) > 0 {
path = fmt.Sprintf("/%s", pathParts[1])
}
addr = pathParts[0]
}
dnsMatch, _, err := verifyDNS(addr, sshConn)
if err != nil && viper.GetBool("debug") {
log.Println("Error looking up txt records for domain:", addr)
}
proposedHost := fmt.Sprintf("%s%s.%s", addr, hostExtension, viper.GetString("domain"))
domainParts := strings.Join(strings.Split(addr, ".")[1:], ".")
if dnsMatch || (viper.GetBool("bind-any-host") && strings.Contains(addr, ".")) || inList(domainParts, strings.FieldsFunc(viper.GetString("bind-hosts"), CommaSplitFields)) {
proposedHost = addr
if proposedHost == fmt.Sprintf(".%s", viper.GetString("domain")) {
proposedHost = viper.GetString("domain")
}
}
if viper.GetBool("bind-root-domain") && proposedHost == fmt.Sprintf(".%s", viper.GetString("domain")) {
proposedHost = viper.GetString("domain")
}
host := strings.ToLower(proposedHost)
getRandomHost := func() string {
return strings.ToLower(RandStringBytesMaskImprSrc(viper.GetInt("bind-random-subdomains-length")) + "." + viper.GetString("domain"))
}
reportUnavailable := func(unavailable bool) {
if first && unavailable {
extra := " Assigning a random subdomain."
if viper.GetBool("force-requested-subdomains") {
extra = ""
bindErr = fmt.Errorf("unable to bind requested subdomain")
}
sshConn.SendMessage(aurora.Sprintf("The subdomain %s is unavailable.%s", aurora.Red(host), extra), true)
}
}
checkHost := func(checkHost string) bool {
if bindErr != nil {
return false
}
if viper.GetBool("bind-random-subdomains") || !first || inList(host, bannedSubdomainList) {
reportUnavailable(true)
host = getRandomHost()
}
var holder *HTTPHolder
ok := false
state.HTTPListeners.Range(func(key string, locationListener *HTTPHolder) bool {
parsedPassword, _ := locationListener.HTTPUrl.User.Password()
if host == locationListener.HTTPUrl.Host && strings.HasPrefix(path, locationListener.HTTPUrl.Path) && username == locationListener.HTTPUrl.User.Username() && password == parsedPassword {
ok = true
holder = locationListener
return false
}
return true
})
if ok && viper.GetBool("http-load-balancer") {
pH = holder
ok = false
}
reportUnavailable(ok)
first = false
return ok
}
for checkHost(host) {
}
if bindErr != nil {
return nil, nil
}
hostUrl := &url.URL{
User: url.UserPassword(username, password),
Host: host,
Path: path,
}
return hostUrl, pH
}
return getUnusedHost()
}
// GetOpenAlias returns open aliases or a random one if it is not enabled.
// If load balancing is enabled, it will return the requested alias.
func GetOpenAlias(addr string, port string, state *State, sshConn *SSHConnection) (string, *AliasHolder) {
getUnusedAlias := func() (string, *AliasHolder) {
var aH *AliasHolder
var bindErr error
first := true
alias := fmt.Sprintf("%s:%s", strings.ToLower(addr), port)
getRandomAlias := func() string {
return fmt.Sprintf("%s:%s", strings.ToLower(RandStringBytesMaskImprSrc(viper.GetInt("bind-random-aliases-length"))), port)
}
reportUnavailable := func(unavailable bool) {
if first && unavailable {
extra := " Assigning a random alias."
if viper.GetBool("force-requested-aliases") {
extra = ""
bindErr = fmt.Errorf("unable to bind requested port")
}
sshConn.SendMessage(aurora.Sprintf("The alias %s is unavailable.%s", aurora.Red(alias), extra), true)
}
}
checkAlias := func(checkAlias string) bool {
if bindErr != nil {
return false
}
if viper.GetBool("bind-random-aliases") || !first || inList(alias, bannedAliasList) {
reportUnavailable(true)
alias = getRandomAlias()
}
holder, ok := state.AliasListeners.Load(alias)
if ok && viper.GetBool("alias-load-balancer") {
aH = holder
ok = false
}
reportUnavailable(ok)
first = false
return ok
}
for checkAlias(alias) {
}
return alias, aH
}
return getUnusedAlias()
}
// RandStringBytesMaskImprSrc creates a random string of length n
// https://stackoverflow.com/questions/22892120/how-to-generate-a-random-string-of-a-fixed-length-in-golang
func RandStringBytesMaskImprSrc(n int) string {
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
const (
letterIdxBits = 6 // 6 bits to represent a letter index
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
)
var src = mathrand.NewSource(time.Now().UnixNano())
b := make([]byte, n)
// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = src.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
b[i] = letterBytes[idx]
i--
}
cache >>= letterIdxBits
remain--
}
return string(b)
}
|
GetSSHConfig
|
webapis-notification.ts
|
/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {patchOnProperties} from '../common/utils';
((_global: any) => {
// patch Notification
patchNotification(_global);
function
|
(_global: any) {
const Notification = _global['Notification'];
if (!Notification || !Notification.prototype) {
return;
}
patchOnProperties(Notification.prototype, null);
}
})(typeof window === 'object' && window || typeof self === 'object' && self || global);
|
patchNotification
|
bufferHumanJson.ts
|
/**
* Copyright 2018 Twitter, Inc.
* Licensed under the Apache License, Version 2.0
* http://www.apache.org/licenses/LICENSE-2.0
*/
import { bufferToHexEncodedAscii, mapObject } from "sqrl-common";
function friendlyBuffer(buf) {
try {
|
}
}
function deepFriendlyMap(obj) {
if (typeof obj === "bigint") {
return obj.toString();
} else if (obj instanceof Buffer) {
return friendlyBuffer(obj);
} else if (Array.isArray(obj)) {
return obj.map(deepFriendlyMap);
} else if (typeof obj === "object" && obj !== null) {
return mapObject(obj, deepFriendlyMap);
} else {
return obj;
}
}
/**
* Convert a given buffer into something that is human readable.
* This process is not necessarily reversible and is meant for convenience
* only.
*/
export function bufferHumanJson(msg: Buffer): any {
try {
return JSON.parse(msg.toString("utf-8"));
} catch (err) {
return deepFriendlyMap(msg);
}
}
|
const text = buf.toString("utf-8");
return bufferToHexEncodedAscii(text);
} catch (err) {
return `0x${buf.toString("hex")}`;
|
context.rs
|
use std::cell::RefCell;
use std::time;
use crate::config;
pub struct HintInterrupt {
start: time::Instant,
duration: time::Duration,
}
impl fend_core::Interrupt for HintInterrupt {
fn should_interrupt(&self) -> bool {
time::Instant::now().duration_since(self.start) >= self.duration
}
}
impl Default for HintInterrupt {
fn default() -> Self {
Self {
start: time::Instant::now(),
duration: time::Duration::from_millis(20),
}
}
}
#[allow(clippy::module_name_repetitions)]
pub struct InnerContext {
core_ctx: fend_core::Context,
// true if the user typed some partial input, false otherwise
input_typed: bool,
}
impl InnerContext {
pub fn new(config: &config::Config) -> Self {
let mut res = Self {
core_ctx: fend_core::Context::new(),
input_typed: false,
};
if config.coulomb_and_farad {
res.core_ctx.use_coulomb_and_farad();
}
res
}
}
#[derive(Clone)]
pub struct Context<'a> {
ctx: &'a RefCell<InnerContext>,
}
impl<'a> Context<'a> {
pub fn new(ctx: &'a RefCell<InnerContext>) -> Self {
Self { ctx }
}
pub fn
|
(
&self,
line: &str,
int: &impl fend_core::Interrupt,
) -> Result<fend_core::FendResult, String> {
let mut ctx_borrow = self.ctx.borrow_mut();
ctx_borrow.core_ctx.set_random_u32_fn(random_u32);
ctx_borrow.core_ctx.set_output_mode_terminal();
ctx_borrow.input_typed = false;
fend_core::evaluate_with_interrupt(line, &mut ctx_borrow.core_ctx, int)
}
pub fn eval_hint(&self, line: &str) -> fend_core::FendResult {
let mut ctx_borrow = self.ctx.borrow_mut();
ctx_borrow.core_ctx.set_output_mode_terminal();
ctx_borrow.input_typed = !line.is_empty();
let int = HintInterrupt::default();
fend_core::evaluate_preview_with_interrupt(line, &mut ctx_borrow.core_ctx, &int)
}
pub fn get_input_typed(&self) -> bool {
self.ctx.borrow().input_typed
}
}
fn random_u32() -> u32 {
let mut rng = nanorand::WyRand::new();
nanorand::Rng::generate(&mut rng)
}
|
eval
|
register_ext.rs
|
// SPDX-License-Identifier: MIT
// Copyright (C) 2018-present iced project and contributors
use crate::register::{iced_to_register, register_to_iced, Register};
use wasm_bindgen::prelude::*;
/// [`Register`] enum extension methods
///
/// [`Register`]: enum.Register.html
#[wasm_bindgen]
pub struct RegisterExt;
#[wasm_bindgen]
impl RegisterExt {
/// Gets the base register, eg. `AL`, `AX`, `EAX`, `RAX`, `MM0`, `XMM0`, `YMM0`, `ZMM0`, `ES`
///
|
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.equal(RegisterExt.base(Register.GS), Register.ES);
/// assert.equal(RegisterExt.base(Register.SIL), Register.AL);
/// assert.equal(RegisterExt.base(Register.SP), Register.AX);
/// assert.equal(RegisterExt.base(Register.R13D), Register.EAX);
/// assert.equal(RegisterExt.base(Register.RBP), Register.RAX);
/// assert.equal(RegisterExt.base(Register.MM6), Register.MM0);
/// assert.equal(RegisterExt.base(Register.XMM28), Register.XMM0);
/// assert.equal(RegisterExt.base(Register.YMM12), Register.YMM0);
/// assert.equal(RegisterExt.base(Register.ZMM31), Register.ZMM0);
/// assert.equal(RegisterExt.base(Register.K3), Register.K0);
/// assert.equal(RegisterExt.base(Register.BND1), Register.BND0);
/// assert.equal(RegisterExt.base(Register.ST7), Register.ST0);
/// assert.equal(RegisterExt.base(Register.CR8), Register.CR0);
/// assert.equal(RegisterExt.base(Register.DR6), Register.DR0);
/// assert.equal(RegisterExt.base(Register.TR3), Register.TR0);
/// assert.equal(RegisterExt.base(Register.RIP), Register.EIP);
/// ```
pub fn base(value: Register) -> Register {
iced_to_register(register_to_iced(value).base())
}
/// The register number (index) relative to [`RegisterExt.base()`], eg. 0-15, or 0-31, or if 8-bit GPR, 0-19
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
/// [`RegisterExt.base()`]: #method.base
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.equal(RegisterExt.number(Register.GS), 5);
/// assert.equal(RegisterExt.number(Register.SIL), 10);
/// assert.equal(RegisterExt.number(Register.SP), 4);
/// assert.equal(RegisterExt.number(Register.R13D), 13);
/// assert.equal(RegisterExt.number(Register.RBP), 5);
/// assert.equal(RegisterExt.number(Register.MM6), 6);
/// assert.equal(RegisterExt.number(Register.XMM28), 28);
/// assert.equal(RegisterExt.number(Register.YMM12), 12);
/// assert.equal(RegisterExt.number(Register.ZMM31), 31);
/// assert.equal(RegisterExt.number(Register.K3), 3);
/// assert.equal(RegisterExt.number(Register.BND1), 1);
/// assert.equal(RegisterExt.number(Register.ST7), 7);
/// assert.equal(RegisterExt.number(Register.CR8), 8);
/// assert.equal(RegisterExt.number(Register.DR6), 6);
/// assert.equal(RegisterExt.number(Register.TR3), 3);
/// assert.equal(RegisterExt.number(Register.RIP), 1);
/// ```
pub fn number(value: Register) -> u32 {
register_to_iced(value).number() as u32
}
/// Gets the full register that this one is a part of, eg. `CL`/`CH`/`CX`/`ECX`/`RCX` -> `RCX`, `XMM11`/`YMM11`/`ZMM11` -> `ZMM11`
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.equal(RegisterExt.fullRegister(Register.GS), Register.GS);
/// assert.equal(RegisterExt.fullRegister(Register.SIL), Register.RSI);
/// assert.equal(RegisterExt.fullRegister(Register.SP), Register.RSP);
/// assert.equal(RegisterExt.fullRegister(Register.R13D), Register.R13);
/// assert.equal(RegisterExt.fullRegister(Register.RBP), Register.RBP);
/// assert.equal(RegisterExt.fullRegister(Register.MM6), Register.MM6);
/// assert.equal(RegisterExt.fullRegister(Register.XMM10), Register.ZMM10);
/// assert.equal(RegisterExt.fullRegister(Register.YMM10), Register.ZMM10);
/// assert.equal(RegisterExt.fullRegister(Register.ZMM10), Register.ZMM10);
/// assert.equal(RegisterExt.fullRegister(Register.K3), Register.K3);
/// assert.equal(RegisterExt.fullRegister(Register.BND1), Register.BND1);
/// assert.equal(RegisterExt.fullRegister(Register.ST7), Register.ST7);
/// assert.equal(RegisterExt.fullRegister(Register.CR8), Register.CR8);
/// assert.equal(RegisterExt.fullRegister(Register.DR6), Register.DR6);
/// assert.equal(RegisterExt.fullRegister(Register.TR3), Register.TR3);
/// assert.equal(RegisterExt.fullRegister(Register.RIP), Register.RIP);
/// ```
#[wasm_bindgen(js_name = "fullRegister")]
pub fn full_register(value: Register) -> Register {
iced_to_register(register_to_iced(value).full_register())
}
/// Gets the full register that this one is a part of, except if it's a GPR in which case the 32-bit register is returned,
/// eg. `CL`/`CH`/`CX`/`ECX`/`RCX` -> `ECX`, `XMM11`/`YMM11`/`ZMM11` -> `ZMM11`
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.equal(RegisterExt.fullRegister32(Register.GS), Register.GS);
/// assert.equal(RegisterExt.fullRegister32(Register.SIL), Register.ESI);
/// assert.equal(RegisterExt.fullRegister32(Register.SP), Register.ESP);
/// assert.equal(RegisterExt.fullRegister32(Register.R13D), Register.R13D);
/// assert.equal(RegisterExt.fullRegister32(Register.RBP), Register.EBP);
/// assert.equal(RegisterExt.fullRegister32(Register.MM6), Register.MM6);
/// assert.equal(RegisterExt.fullRegister32(Register.XMM10), Register.ZMM10);
/// assert.equal(RegisterExt.fullRegister32(Register.YMM10), Register.ZMM10);
/// assert.equal(RegisterExt.fullRegister32(Register.ZMM10), Register.ZMM10);
/// assert.equal(RegisterExt.fullRegister32(Register.K3), Register.K3);
/// assert.equal(RegisterExt.fullRegister32(Register.BND1), Register.BND1);
/// assert.equal(RegisterExt.fullRegister32(Register.ST7), Register.ST7);
/// assert.equal(RegisterExt.fullRegister32(Register.CR8), Register.CR8);
/// assert.equal(RegisterExt.fullRegister32(Register.DR6), Register.DR6);
/// assert.equal(RegisterExt.fullRegister32(Register.TR3), Register.TR3);
/// assert.equal(RegisterExt.fullRegister32(Register.RIP), Register.RIP);
/// ```
#[wasm_bindgen(js_name = "fullRegister32")]
pub fn full_register32(value: Register) -> Register {
iced_to_register(register_to_iced(value).full_register32())
}
/// Gets the size of the register in bytes
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.equal(RegisterExt.size(Register.GS), 2);
/// assert.equal(RegisterExt.size(Register.SIL), 1);
/// assert.equal(RegisterExt.size(Register.SP), 2);
/// assert.equal(RegisterExt.size(Register.R13D), 4);
/// assert.equal(RegisterExt.size(Register.RBP), 8);
/// assert.equal(RegisterExt.size(Register.MM6), 8);
/// assert.equal(RegisterExt.size(Register.XMM10), 16);
/// assert.equal(RegisterExt.size(Register.YMM10), 32);
/// assert.equal(RegisterExt.size(Register.ZMM10), 64);
/// assert.equal(RegisterExt.size(Register.K3), 8);
/// assert.equal(RegisterExt.size(Register.BND1), 16);
/// assert.equal(RegisterExt.size(Register.ST7), 10);
/// assert.equal(RegisterExt.size(Register.CR8), 8);
/// assert.equal(RegisterExt.size(Register.DR6), 8);
/// assert.equal(RegisterExt.size(Register.TR3), 4);
/// assert.equal(RegisterExt.size(Register.RIP), 8);
/// ```
pub fn size(value: Register) -> u32 {
register_to_iced(value).size() as u32
}
}
#[wasm_bindgen]
impl RegisterExt {
/// Checks if it's a segment register (`ES`, `CS`, `SS`, `DS`, `FS`, `GS`)
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.ok(RegisterExt.isSegmentRegister(Register.GS));
/// assert.ok(!RegisterExt.isSegmentRegister(Register.RCX));
/// ```
#[wasm_bindgen(js_name = "isSegmentRegister")]
pub fn is_segment_register(value: Register) -> bool {
register_to_iced(value).is_segment_register()
}
/// Checks if it's a general purpose register (`AL`-`R15L`, `AX`-`R15W`, `EAX`-`R15D`, `RAX`-`R15`)
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.ok(!RegisterExt.isGPR(Register.GS));
/// assert.ok(RegisterExt.isGPR(Register.CH));
/// assert.ok(RegisterExt.isGPR(Register.DX));
/// assert.ok(RegisterExt.isGPR(Register.R13D));
/// assert.ok(RegisterExt.isGPR(Register.RSP));
/// assert.ok(!RegisterExt.isGPR(Register.XMM0));
/// ```
#[wasm_bindgen(js_name = "isGPR")]
pub fn is_gpr(value: Register) -> bool {
register_to_iced(value).is_gpr()
}
/// Checks if it's an 8-bit general purpose register (`AL`-`R15L`)
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.ok(!RegisterExt.isGPR8(Register.GS));
/// assert.ok(RegisterExt.isGPR8(Register.CH));
/// assert.ok(!RegisterExt.isGPR8(Register.DX));
/// assert.ok(!RegisterExt.isGPR8(Register.R13D));
/// assert.ok(!RegisterExt.isGPR8(Register.RSP));
/// assert.ok(!RegisterExt.isGPR8(Register.XMM0));
/// ```
#[wasm_bindgen(js_name = "isGPR8")]
pub fn is_gpr8(value: Register) -> bool {
register_to_iced(value).is_gpr8()
}
/// Checks if it's a 16-bit general purpose register (`AX`-`R15W`)
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.ok(!RegisterExt.isGPR16(Register.GS));
/// assert.ok(!RegisterExt.isGPR16(Register.CH));
/// assert.ok(RegisterExt.isGPR16(Register.DX));
/// assert.ok(!RegisterExt.isGPR16(Register.R13D));
/// assert.ok(!RegisterExt.isGPR16(Register.RSP));
/// assert.ok(!RegisterExt.isGPR16(Register.XMM0));
/// ```
#[wasm_bindgen(js_name = "isGPR16")]
pub fn is_gpr16(value: Register) -> bool {
register_to_iced(value).is_gpr16()
}
/// Checks if it's a 32-bit general purpose register (`EAX`-`R15D`)
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.ok(!RegisterExt.isGPR32(Register.GS));
/// assert.ok(!RegisterExt.isGPR32(Register.CH));
/// assert.ok(!RegisterExt.isGPR32(Register.DX));
/// assert.ok(RegisterExt.isGPR32(Register.R13D));
/// assert.ok(!RegisterExt.isGPR32(Register.RSP));
/// assert.ok(!RegisterExt.isGPR32(Register.XMM0));
/// ```
#[wasm_bindgen(js_name = "isGPR32")]
pub fn is_gpr32(value: Register) -> bool {
register_to_iced(value).is_gpr32()
}
/// Checks if it's a 64-bit general purpose register (`RAX`-`R15`)
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.ok(!RegisterExt.isGPR64(Register.GS));
/// assert.ok(!RegisterExt.isGPR64(Register.CH));
/// assert.ok(!RegisterExt.isGPR64(Register.DX));
/// assert.ok(!RegisterExt.isGPR64(Register.R13D));
/// assert.ok(RegisterExt.isGPR64(Register.RSP));
/// assert.ok(!RegisterExt.isGPR64(Register.XMM0));
/// ```
#[wasm_bindgen(js_name = "isGPR64")]
pub fn is_gpr64(value: Register) -> bool {
register_to_iced(value).is_gpr64()
}
/// Checks if it's a 128-bit vector register (`XMM0`-`XMM31`)
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.ok(!RegisterExt.isXMM(Register.R13D));
/// assert.ok(!RegisterExt.isXMM(Register.RSP));
/// assert.ok(RegisterExt.isXMM(Register.XMM0));
/// assert.ok(!RegisterExt.isXMM(Register.YMM0));
/// assert.ok(!RegisterExt.isXMM(Register.ZMM0));
/// ```
#[wasm_bindgen(js_name = "isXMM")]
pub fn is_xmm(value: Register) -> bool {
register_to_iced(value).is_xmm()
}
/// Checks if it's a 256-bit vector register (`YMM0`-`YMM31`)
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.ok(!RegisterExt.isYMM(Register.R13D));
/// assert.ok(!RegisterExt.isYMM(Register.RSP));
/// assert.ok(!RegisterExt.isYMM(Register.XMM0));
/// assert.ok(RegisterExt.isYMM(Register.YMM0));
/// assert.ok(!RegisterExt.isYMM(Register.ZMM0));
/// ```
#[wasm_bindgen(js_name = "isYMM")]
pub fn is_ymm(value: Register) -> bool {
register_to_iced(value).is_ymm()
}
/// Checks if it's a 512-bit vector register (`ZMM0`-`ZMM31`)
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.ok(!RegisterExt.isZMM(Register.R13D));
/// assert.ok(!RegisterExt.isZMM(Register.RSP));
/// assert.ok(!RegisterExt.isZMM(Register.XMM0));
/// assert.ok(!RegisterExt.isZMM(Register.YMM0));
/// assert.ok(RegisterExt.isZMM(Register.ZMM0));
/// ```
#[wasm_bindgen(js_name = "isZMM")]
pub fn is_zmm(value: Register) -> bool {
register_to_iced(value).is_zmm()
}
/// Checks if it's an `XMM`, `YMM` or `ZMM` register
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.ok(!RegisterExt.isVectorRegister(Register.R13D));
/// assert.ok(!RegisterExt.isVectorRegister(Register.RSP));
/// assert.ok(RegisterExt.isVectorRegister(Register.XMM0));
/// assert.ok(RegisterExt.isVectorRegister(Register.YMM0));
/// assert.ok(RegisterExt.isVectorRegister(Register.ZMM0));
/// ```
#[wasm_bindgen(js_name = "isVectorRegister")]
pub fn is_vector_register(value: Register) -> bool {
register_to_iced(value).is_vector_register()
}
/// Checks if it's `EIP`/`RIP`
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.ok(RegisterExt.isIP(Register.EIP));
/// assert.ok(RegisterExt.isIP(Register.RIP));
/// ```
#[wasm_bindgen(js_name = "isIP")]
pub fn is_ip(value: Register) -> bool {
register_to_iced(value).is_ip()
}
/// Checks if it's an opmask register (`K0`-`K7`)
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.ok(!RegisterExt.isK(Register.R13D));
/// assert.ok(RegisterExt.isK(Register.K3));
/// ```
#[wasm_bindgen(js_name = "isK")]
pub fn is_k(value: Register) -> bool {
register_to_iced(value).is_k()
}
/// Checks if it's a control register (`CR0`-`CR15`)
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.ok(!RegisterExt.isCR(Register.R13D));
/// assert.ok(RegisterExt.isCR(Register.CR3));
/// ```
#[wasm_bindgen(js_name = "isCR")]
pub fn is_cr(value: Register) -> bool {
register_to_iced(value).is_cr()
}
/// Checks if it's a debug register (`DR0`-`DR15`)
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.ok(!RegisterExt.isDR(Register.R13D));
/// assert.ok(RegisterExt.isDR(Register.DR3));
/// ```
#[wasm_bindgen(js_name = "isDR")]
pub fn is_dr(value: Register) -> bool {
register_to_iced(value).is_dr()
}
/// Checks if it's a test register (`TR0`-`TR7`)
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.ok(!RegisterExt.isTR(Register.R13D));
/// assert.ok(RegisterExt.isTR(Register.TR3));
/// ```
#[wasm_bindgen(js_name = "isTR")]
pub fn is_tr(value: Register) -> bool {
register_to_iced(value).is_tr()
}
/// Checks if it's an FPU stack register (`ST0`-`ST7`)
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.ok(!RegisterExt.isST(Register.R13D));
/// assert.ok(RegisterExt.isST(Register.ST3));
/// ```
#[wasm_bindgen(js_name = "isST")]
pub fn is_st(value: Register) -> bool {
register_to_iced(value).is_st()
}
/// Checks if it's a bound register (`BND0`-`BND3`)
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.ok(!RegisterExt.isBND(Register.R13D));
/// assert.ok(RegisterExt.isBND(Register.BND3));
/// ```
#[wasm_bindgen(js_name = "isBND")]
pub fn is_bnd(value: Register) -> bool {
register_to_iced(value).is_bnd()
}
/// Checks if it's an MMX register (`MM0`-`MM7`)
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.ok(!RegisterExt.isMM(Register.R13D));
/// assert.ok(RegisterExt.isMM(Register.MM3));
/// ```
#[wasm_bindgen(js_name = "isMM")]
pub fn is_mm(value: Register) -> bool {
register_to_iced(value).is_mm()
}
/// Checks if it's a tile register (`TMM0`-`TMM7`)
///
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
/// [`Register`]: enum.Register.html
///
/// # Examples
///
/// ```js
/// const assert = require("assert").strict;
/// const { Register, RegisterExt } = require("iced-x86");
///
/// assert.ok(!RegisterExt.isTMM(Register.R13D));
/// assert.ok(RegisterExt.isTMM(Register.TMM3));
/// ```
#[wasm_bindgen(js_name = "isTMM")]
pub fn is_tmm(value: Register) -> bool {
register_to_iced(value).is_tmm()
}
}
|
/// # Arguments
///
/// - `value`: A [`Register`] enum value
///
|
sql.rs
|
// Copyright Materialize, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
//! Integration tests for SQL functionality.
//!
//! Nearly all tests for SQL behavior should be sqllogictest or testdrive
//! scripts. The tests here are simply too complicated to be easily expressed
//! in testdrive, e.g., because they depend on the current time.
use std::error::Error;
use std::io::Read;
use std::io::Write;
use std::net::Shutdown;
use std::net::TcpListener;
use std::sync::Arc;
use std::sync::Mutex;
use std::thread;
use std::time::{Duration, Instant};
use chrono::{DateTime, Utc};
use mz_ore::now::NowFn;
use mz_ore::now::NOW_ZERO;
use mz_ore::now::SYSTEM_TIME;
use postgres::Row;
use regex::Regex;
use tracing::info;
use mz_ore::assert_contains;
use crate::util::{MzTimestamp, PostgresErrorExt, KAFKA_ADDRS};
pub mod util;
#[test]
fn test_no_block() -> Result<(), anyhow::Error> {
mz_ore::test::init_logging();
// This is better than relying on CI to time out, because an actual failure
// (as opposed to a CI timeout) causes `services.log` to be uploaded.
mz_ore::test::timeout(Duration::from_secs(30), || {
// Create a listener that will simulate a slow Confluent Schema Registry.
info!("test_no_block: creating listener");
let listener = TcpListener::bind("localhost:0")?;
let listener_port = listener.local_addr()?.port();
info!("test_no_block: starting server");
let server = util::start_server(util::Config::default())?;
info!("test_no_block: connecting to server");
let mut client = server.connect(postgres::NoTls)?;
info!("test_no_block: spawning thread");
let slow_thread = thread::spawn(move || {
info!("test_no_block: in thread; executing create source");
let result = client.batch_execute(&format!(
"CREATE SOURCE foo \
FROM KAFKA BROKER '{}' TOPIC 'foo' \
FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY 'http://localhost:{}'",
&*KAFKA_ADDRS, listener_port,
));
info!("test_no_block: in thread; create source done");
result
});
// Wait for materialized to contact the schema registry, which indicates
// the coordinator is processing the CREATE SOURCE command. It will be
// unable to complete the query until we respond.
info!("test_no_block: accepting fake schema registry connection");
let (mut stream, _) = listener.accept()?;
// Verify that the coordinator can still process other requests from other
// sessions.
info!("test_no_block: connecting to server again");
let mut client = server.connect(postgres::NoTls)?;
info!("test_no_block: executing query");
let answer: i32 = client.query_one("SELECT 1 + 1", &[])?.get(0);
assert_eq!(answer, 2);
info!("test_no_block: reading the HTTP request");
let mut buf = vec![0; 1024];
let mut input = vec![];
// The HTTP request will end in two CRLFs, so detect that to know we've finished reading.
while {
let len = input.len();
len < 4 || &input[len - 4..] != b"\r\n\r\n"
} {
let len_read = stream.read(&mut buf).unwrap();
assert!(len_read > 0);
input.extend_from_slice(&buf[0..len_read]);
}
// Return an error to the coordinator, so that we can shutdown cleanly.
info!("test_no_block: writing fake schema registry error");
write!(stream, "HTTP/1.1 503 Service Unavailable\r\n\r\n")?;
info!("test_no_block: shutting down fake schema registry connection");
stream.shutdown(Shutdown::Write).unwrap();
// Verify that the schema registry error was returned to the client, for
// good measure.
info!("test_no_block: joining thread");
let slow_res = slow_thread.join().unwrap();
assert_contains!(slow_res.unwrap_err().to_string(), "server error 503");
info!("test_no_block: returning");
Ok(())
})
}
#[test]
fn test_time() -> Result<(), Box<dyn Error>> {
mz_ore::test::init_logging();
let server = util::start_server(util::Config::default())?;
let mut client = server.connect(postgres::NoTls)?;
// Confirm that `now()` and `current_timestamp()` both return a
// DateTime<Utc>, but don't assert specific times.
let row = client.query_one("SELECT now(), current_timestamp()", &[])?;
let _ = row.get::<_, DateTime<Utc>>(0);
let _ = row.get::<_, DateTime<Utc>>(1);
// Confirm calls to now() return the same DateTime<Utc> both inside and
// outside of subqueries.
let row = client.query_one("SELECT now(), (SELECT now())", &[])?;
assert_eq!(
row.get::<_, DateTime<Utc>>(0),
row.get::<_, DateTime<Utc>>(1)
);
// Ensure that EXPLAIN selects a timestamp for `now()` and
// `current_timestamp()`, though we don't care what the timestamp is.
let rows = client.query("EXPLAIN PLAN FOR SELECT now(), current_timestamp()", &[])?;
assert_eq!(1, rows.len());
// Test that `mz_sleep` causes a delay of at least the appropriate time.
let start = Instant::now();
client.batch_execute("SELECT mz_internal.mz_sleep(0.3)")?;
let elapsed = start.elapsed();
assert!(
elapsed >= Duration::from_millis(300),
"start.elapsed() = {:?}",
elapsed
);
Ok(())
}
#[test]
fn test_tail_consolidation() -> Result<(), Box<dyn Error>> {
mz_ore::test::init_logging();
let config = util::Config::default().workers(2);
let server = util::start_server(config)?;
let mut client_writes = server.connect(postgres::NoTls)?;
let mut client_reads = server.connect(postgres::NoTls)?;
client_writes.batch_execute("CREATE TABLE t (data text)")?;
client_reads.batch_execute(
"BEGIN;
DECLARE c CURSOR FOR TAIL t;",
)?;
let data = format!("line {}", 42);
client_writes.execute(
"INSERT INTO t VALUES ($1), ($2), ($3)",
&[&data, &data, &data],
)?;
let row = client_reads.query_one("FETCH ALL c", &[])?;
assert_eq!(row.get::<_, i64>("mz_diff"), 3);
assert_eq!(row.get::<_, String>("data"), data);
Ok(())
}
#[test]
fn test_tail_negative_diffs() -> Result<(), Box<dyn Error>> {
mz_ore::test::init_logging();
let config = util::Config::default().workers(2);
let server = util::start_server(config)?;
let mut client_writes = server.connect(postgres::NoTls)?;
let mut client_reads = server.connect(postgres::NoTls)?;
client_writes.batch_execute("CREATE TABLE t (data text)")?;
client_writes.batch_execute(
"CREATE MATERIALIZED VIEW counts AS SELECT data AS key, COUNT(data) AS count FROM t GROUP BY data",
)?;
client_reads.batch_execute(
"BEGIN;
DECLARE c CURSOR FOR TAIL counts;",
)?;
let data = format!("line {}", 42);
client_writes.execute("INSERT INTO t VALUES ($1)", &[&data])?;
let row = client_reads.query_one("FETCH ALL c", &[])?;
assert_eq!(row.get::<_, i64>("mz_diff"), 1);
assert_eq!(row.get::<_, String>("key"), data);
assert_eq!(row.get::<_, i64>("count"), 1);
// send another row with the same key, this will retract the previous
// count and emit an updated count
let data = format!("line {}", 42);
client_writes.execute("INSERT INTO t VALUES ($1)", &[&data])?;
let rows = client_reads.query("FETCH ALL c", &[])?;
let mut rows = rows.iter();
let row = rows.next().expect("missing result");
assert_eq!(row.get::<_, i64>("mz_diff"), -1);
assert_eq!(row.get::<_, String>("key"), data);
assert_eq!(row.get::<_, i64>("count"), 1);
let row = rows.next().expect("missing result");
assert_eq!(row.get::<_, i64>("mz_diff"), 1);
assert_eq!(row.get::<_, String>("key"), data);
assert_eq!(row.get::<_, i64>("count"), 2);
Ok(())
}
#[test]
fn test_tail_basic() -> Result<(), Box<dyn Error>> {
mz_ore::test::init_logging();
// Set the timestamp to zero for deterministic initial timestamps.
let nowfn = Arc::new(Mutex::new(NOW_ZERO.clone()));
let now = {
let nowfn = Arc::clone(&nowfn);
NowFn::from(move || (nowfn.lock().unwrap())())
};
let config = util::Config::default().workers(2).with_now(now);
let server = util::start_server(config)?;
let mut client_writes = server.connect(postgres::NoTls)?;
let mut client_reads = server.connect(postgres::NoTls)?;
client_writes.batch_execute("CREATE TABLE t (data text)")?;
client_writes.batch_execute("CREATE DEFAULT INDEX t_primary_idx ON t")?;
// Now that the index (and its since) are initialized to 0, we can resume using
// system time. Do a read to bump the oracle's state so it will read from the
// system clock during inserts below.
*nowfn.lock().unwrap() = SYSTEM_TIME.clone();
client_writes.batch_execute("SELECT * FROM t")?;
client_reads.batch_execute(
"BEGIN;
DECLARE c CURSOR FOR TAIL t;",
)?;
// Locks the timestamp of the TAIL to before any of the following INSERTs, which is required
// for mz_timestamp column to be accurate
let _ = client_reads.query_one("FETCH 0 c", &[]);
let mut events = vec![];
for i in 1..=3 {
let data = format!("line {}", i);
client_writes.execute("INSERT INTO t VALUES ($1)", &[&data])?;
let row = client_reads.query_one("FETCH ALL c", &[])?;
assert_eq!(row.get::<_, i64>("mz_diff"), 1);
assert_eq!(row.get::<_, String>("data"), data);
events.push((row.get::<_, MzTimestamp>("mz_timestamp").0, data));
if i > 1 {
// write timestamps should all increase
assert!(events[i - 1].0 > events[i - 2].0);
}
}
// Now tail without a snapshot as of each timestamp, verifying that when we do
// so we only see events that occur as of or later than that timestamp.
for (ts, _) in &events {
client_reads.batch_execute(&*format!(
"COMMIT; BEGIN;
DECLARE c CURSOR FOR TAIL t WITH (SNAPSHOT = false) AS OF {}",
ts - 1
))?;
// Skip by the things we won't be able to see.
for (_, expected) in events.iter().skip_while(|(inner_ts, _)| inner_ts < ts) {
let actual = client_reads.query_one("FETCH c", &[])?;
assert_eq!(actual.get::<_, String>("data"), *expected);
}
}
// Now tail with a snapshot as of each timestamp. We should see a batch of
// updates all at the tailed timestamp, and then updates afterward.
for (ts, _) in &events {
client_reads.batch_execute(&*format!(
"COMMIT; BEGIN;
DECLARE c CURSOR FOR TAIL t AS OF {}",
ts - 1
))?;
for (mut expected_ts, expected_data) in events.iter() {
if expected_ts < ts - 1 {
// If the thing we initially got was before the timestamp, it should have gotten
// fast-forwarded up to the timestamp.
expected_ts = ts - 1;
}
let actual = client_reads.query_one("FETCH c", &[])?;
assert_eq!(actual.get::<_, String>("data"), *expected_data);
assert_eq!(actual.get::<_, MzTimestamp>("mz_timestamp").0, expected_ts);
}
}
// Aggressively compact the data in the index, then tail an unmaterialized
// view derived from the index. This previously selected an invalid
// `AS OF` timestamp (#5391).
client_writes
.batch_execute("ALTER INDEX t_primary_idx SET (logical_compaction_window = '1ms')")?;
client_writes.batch_execute("CREATE VIEW v AS SELECT * FROM t")?;
client_reads.batch_execute(
"COMMIT; BEGIN;
DECLARE c CURSOR FOR TAIL v;",
)?;
let rows = client_reads.query("FETCH ALL c", &[])?;
assert_eq!(rows.len(), 3);
for i in 0..3 {
assert_eq!(rows[i].get::<_, i64>("mz_diff"), 1);
assert_eq!(rows[i].get::<_, String>("data"), format!("line {}", i + 1));
}
// Wait until compaction kicks in and we get an error on trying to read from the cursor.
let err = loop {
client_reads.batch_execute("COMMIT; BEGIN; DECLARE c CURSOR FOR TAIL v AS OF 1")?;
if let Err(err) = client_reads.query("FETCH ALL c", &[]) {
break err;
}
};
assert!(err
.unwrap_db_error()
.message()
.starts_with("Timestamp (1) is not valid for all inputs"));
Ok(())
}
/// Test the done messages by sending inserting a single row and waiting to
/// observe it. Since TAIL always sends a progressed message at the end of its
/// batches and we won't yet insert a second row, we know that if we've seen a
/// data row we will also see one progressed message.
#[test]
fn test_tail_progress() -> Result<(), Box<dyn Error>> {
mz_ore::test::init_logging();
let config = util::Config::default().workers(2);
let server = util::start_server(config)?;
let mut client_writes = server.connect(postgres::NoTls)?;
let mut client_reads = server.connect(postgres::NoTls)?;
client_writes.batch_execute("CREATE TABLE t1 (data text)")?;
client_reads.batch_execute(
"COMMIT; BEGIN;
DECLARE c1 CURSOR FOR TAIL t1 WITH (PROGRESS);",
)?;
#[derive(PartialEq)]
enum State {
WaitingForData,
WaitingForProgress(MzTimestamp),
Done,
}
for i in 1..=3 {
let data = format!("line {}", i);
client_writes.execute("INSERT INTO t1 VALUES ($1)", &[&data])?;
// We have to try several times. It might be that the FETCH gets
// a batch that only contains continuous progress statements, without
// any data. We retry until we get the batch that has the data, and
// then verify that it also has a progress statement.
let mut state = State::WaitingForData;
while state != State::Done {
let rows = client_reads.query("FETCH ALL c1", &[])?;
let rows = rows.iter();
// find the data row in the sea of progress rows
// remove progress statements that occurred before our data
let skip_progress = state == State::WaitingForData;
let mut rows = rows
.skip_while(move |row| skip_progress && row.try_get::<_, String>("data").is_err());
if state == State::WaitingForData {
// this must be the data row
let data_row = rows.next();
let data_row = match data_row {
Some(data_row) => data_row,
None => continue, //retry
};
assert_eq!(data_row.get::<_, bool>("mz_progressed"), false);
assert_eq!(data_row.get::<_, i64>("mz_diff"), 1);
assert_eq!(data_row.get::<_, String>("data"), data);
let data_ts: MzTimestamp = data_row.get("mz_timestamp");
state = State::WaitingForProgress(data_ts);
}
if let State::WaitingForProgress(data_ts) = &state {
let mut num_progress_rows = 0;
for progress_row in rows {
assert_eq!(progress_row.get::<_, bool>("mz_progressed"), true);
assert_eq!(progress_row.get::<_, Option<i64>>("mz_diff"), None);
assert_eq!(progress_row.get::<_, Option<String>>("data"), None);
let progress_ts: MzTimestamp = progress_row.get("mz_timestamp");
assert!(data_ts < &progress_ts);
num_progress_rows += 1;
}
if num_progress_rows > 0 {
state = State::Done;
}
}
}
}
Ok(())
}
// Verifies that tailing non-nullable columns with progress information
// turns them into nullable columns. See #6304.
#[test]
fn test_tail_progress_non_nullable_columns() -> Result<(), Box<dyn Error>>
|
/// Verifies that we get continuous progress messages, regardless of if we
/// receive data or not.
#[test]
fn test_tail_continuous_progress() -> Result<(), Box<dyn Error>> {
mz_ore::test::init_logging();
let config = util::Config::default().workers(2);
let server = util::start_server(config)?;
let mut client_writes = server.connect(postgres::NoTls)?;
let mut client_reads = server.connect(postgres::NoTls)?;
client_writes.batch_execute("CREATE TABLE t1 (data text)")?;
client_reads.batch_execute(
"COMMIT; BEGIN;
DECLARE c1 CURSOR FOR TAIL t1 WITH (PROGRESS);",
)?;
let mut last_ts = MzTimestamp(u64::MIN);
let mut verify_rows = move |rows: Vec<Row>| -> (usize, usize) {
let mut num_data_rows = 0;
let mut num_progress_rows = 0;
for row in rows {
let diff = row.get::<_, Option<i64>>("mz_diff");
match diff {
Some(diff) => {
num_data_rows += 1;
assert_eq!(diff, 1);
assert_eq!(row.get::<_, bool>("mz_progressed"), false);
let data = row.get::<_, Option<String>>("data");
assert!(data.is_some());
}
None => {
num_progress_rows += 1;
assert_eq!(row.get::<_, bool>("mz_progressed"), true);
assert_eq!(row.get::<_, Option<String>>("data"), None);
}
}
let ts: MzTimestamp = row.get("mz_timestamp");
assert!(last_ts <= ts);
last_ts = ts;
}
(num_data_rows, num_progress_rows)
};
// make sure we see progress without any data ever being produced
loop {
let rows = client_reads.query("FETCH ALL c1", &[])?;
let (num_data_rows, num_progress_rows) = verify_rows(rows);
assert_eq!(num_data_rows, 0);
if num_progress_rows > 0 {
break;
}
}
client_writes.execute("INSERT INTO t1 VALUES ($1)", &[&"hello".to_owned()])?;
// fetch away the data message, plus maybe some progress messages
let mut num_data_rows = 0;
let mut num_progress_rows = 0;
while num_data_rows == 0 || num_progress_rows == 0 {
let rows = client_reads.query("FETCH ALL c1", &[])?;
let (current_num_data_rows, current_num_progress_rows) = verify_rows(rows);
num_data_rows += current_num_data_rows;
num_progress_rows += current_num_progress_rows;
}
// Try and read some progress messages. The normal update interval is
// 1s, so only wait for two updates. Otherwise this would run for too long.
for _i in 1..=2 {
let rows = client_reads.query("FETCH ALL c1", &[])?;
let (num_data_rows, num_progress_rows) = verify_rows(rows);
assert_eq!(num_data_rows, 0);
assert!(num_progress_rows > 0);
}
Ok(())
}
#[test]
fn test_tail_fetch_timeout() -> Result<(), Box<dyn Error>> {
mz_ore::test::init_logging();
let config = util::Config::default().workers(2);
let server = util::start_server(config)?;
let mut client = server.connect(postgres::NoTls)?;
client.batch_execute("CREATE TABLE t (i INT8)")?;
client.batch_execute("INSERT INTO t VALUES (1), (2), (3);")?;
client.batch_execute(
"BEGIN;
DECLARE c CURSOR FOR TAIL t;",
)?;
let expected: Vec<i64> = vec![1, 2, 3];
let mut expected_iter = expected.iter();
let mut next = expected_iter.next();
// Test 0s timeouts.
while let Some(expect) = next {
let rows = client.query("FETCH c WITH (TIMEOUT = '0s')", &[])?;
// It is fine for there to be no rows ready yet. Immediately try again because
// they should be ready soon.
if rows.len() != 1 {
continue;
}
assert_eq!(rows[0].get::<_, i64>(2), *expect);
next = expected_iter.next();
}
// Test a 1s timeout and make sure we waited for at least that long.
let before = Instant::now();
let rows = client.query("FETCH c WITH (TIMEOUT = '1s')", &[])?;
let duration = before.elapsed();
assert_eq!(rows.len(), 0);
// Make sure we waited at least 1s but also not too long.
assert!(duration >= Duration::from_secs(1));
assert!(duration < Duration::from_secs(10));
// Make a new cursor. Try to fetch more rows from it than exist. Verify that
// we got all the rows we expect and also waited for at least the timeout
// duration. Cursor may take a moment to be ready, so do it in a loop.
client.batch_execute(
"COMMIT; BEGIN;
DECLARE c CURSOR FOR TAIL t",
)?;
loop {
let before = Instant::now();
let rows = client.query("FETCH 4 c WITH (TIMEOUT = '1s')", &[])?;
let duration = before.elapsed();
if rows.len() != 0 {
assert_eq!(rows.len(), expected.len());
assert!(duration >= Duration::from_secs(1));
assert!(duration < Duration::from_secs(10));
for i in 0..expected.len() {
assert_eq!(rows[i].get::<_, i64>(2), expected[i])
}
break;
}
}
// Another fetch should return nothing.
let rows = client.query("FETCH c WITH (TIMEOUT = '0s')", &[])?;
assert_eq!(rows.len(), 0);
// Make a third cursor. Fetch should return immediately if there are enough
// rows, even with a really long timeout.
//
// Regression test for #6307
client.batch_execute(
"COMMIT; BEGIN;
DECLARE c CURSOR FOR TAIL t",
)?;
let before = Instant::now();
// NB: This timeout is chosen such that the test will timeout if the bad
// behavior occurs.
let rows = client.query("FETCH 3 c WITH (TIMEOUT = '1h')", &[])?;
let duration = before.elapsed();
assert_eq!(rows.len(), expected.len());
assert!(duration < Duration::from_secs(10));
for i in 0..expected.len() {
assert_eq!(rows[i].get::<_, i64>(2), expected[i])
}
Ok(())
}
#[test]
fn test_tail_fetch_wait() -> Result<(), Box<dyn Error>> {
mz_ore::test::init_logging();
let config = util::Config::default().workers(2);
let server = util::start_server(config)?;
let mut client = server.connect(postgres::NoTls)?;
client.batch_execute("CREATE TABLE t (i INT8)")?;
client.batch_execute("INSERT INTO t VALUES (1), (2), (3)")?;
client.batch_execute(
"BEGIN;
DECLARE c CURSOR FOR TAIL t;",
)?;
let expected: Vec<i64> = vec![1, 2, 3];
let mut expected_iter = expected.iter();
let mut next = expected_iter.next();
while let Some(expect) = next {
// FETCH with no timeout will wait for at least 1 result.
let rows = client.query("FETCH c", &[])?;
assert_eq!(rows.len(), 1);
assert_eq!(rows[0].get::<_, i64>(2), *expect);
next = expected_iter.next();
}
// Try again with FETCH ALL. ALL only guarantees that all available rows will
// be returned, but it's up to the system to decide what is available. This
// means that we could still get only one row per request, and we won't know
// how many rows will come back otherwise.
client.batch_execute(
"COMMIT; BEGIN;
DECLARE c CURSOR FOR TAIL t;",
)?;
let mut expected_iter = expected.iter().peekable();
while expected_iter.peek().is_some() {
let rows = client.query("FETCH ALL c", &[])?;
assert!(rows.len() > 0);
for row in rows {
let next = expected_iter.next().unwrap();
assert_eq!(*next, row.get::<_, i64>(2));
}
}
// Verify that the wait only happens for TAIL. A SELECT with 0 rows should not
// block.
client.batch_execute("COMMIT")?;
client.batch_execute("CREATE TABLE empty ()")?;
client.batch_execute(
"BEGIN;
DECLARE c CURSOR FOR SELECT * FROM empty;",
)?;
let rows = client.query("FETCH c", &[])?;
assert_eq!(rows.len(), 0);
Ok(())
}
#[test]
fn test_tail_empty_upper_frontier() -> Result<(), Box<dyn Error>> {
mz_ore::test::init_logging();
let config = util::Config::default();
let server = util::start_server(config)?;
let mut client = server.connect(postgres::NoTls)?;
client.batch_execute("CREATE MATERIALIZED VIEW foo AS VALUES (1), (2), (3);")?;
let tail = client.query("TAIL foo WITH (SNAPSHOT = false)", &[])?;
assert_eq!(0, tail.len());
let tail = client.query("TAIL foo WITH (SNAPSHOT)", &[])?;
assert_eq!(3, tail.len());
Ok(())
}
// Tests that a client that launches a non-terminating TAIL and disconnects
// does not keep the server alive forever.
#[test]
fn test_tail_shutdown() -> Result<(), Box<dyn Error>> {
mz_ore::test::init_logging();
let server = util::start_server(util::Config::default())?;
// We have to use the async PostgreSQL client so that we can ungracefully
// abort the connection task.
// See: https://github.com/sfackler/rust-postgres/issues/725
server.runtime.block_on(async {
let (client, conn_task) = server.connect_async(tokio_postgres::NoTls).await?;
// Create a table with no data that we can TAIL. This is the simplest
// way to cause a TAIL to never terminate.
client.batch_execute("CREATE TABLE t ()").await?;
// Launch the ill-fated tail.
client.copy_out("COPY (TAIL t) TO STDOUT").await?;
// Un-gracefully abort the connection.
conn_task.abort();
// Need to await `conn_task` to actually deliver the `abort`. We don't
// care about the result though (it's probably `JoinError` with `is_cancelled` being true).
let _ = conn_task.await;
Ok::<_, Box<dyn Error>>(())
})?;
// Dropping the server will initiate a graceful shutdown. We previously had
// a bug where the server would fail to notice that the client running `TAIL
// v` had disconnected, and would hang forever waiting for data to be
// written to `path`, which in this test never comes. So if this function
// exits, things are working correctly.
Ok(())
}
#[test]
fn test_tail_table_rw_timestamps() -> Result<(), Box<dyn Error>> {
mz_ore::test::init_logging();
let config = util::Config::default().workers(3);
let server = util::start_server(config)?;
let mut client_interactive = server.connect(postgres::NoTls)?;
let mut client_tail = server.connect(postgres::NoTls)?;
let verify_rw_pair = move |mut rows: &[Row], expected_data: &str| -> bool {
// Clear progress rows that may appear after row 2.
for i in (2..rows.len()).rev() {
if rows[i].get::<_, bool>("mz_progressed") {
rows = &rows[..i];
}
}
for (i, row) in rows.iter().enumerate() {
match row.get::<_, Option<String>>("data") {
// Only verify if all rows have expected data
Some(inner) => {
if &inner != expected_data {
return false;
}
}
// Only verify if row without data is last row
None => {
if i + 1 != rows.len() {
return false;
}
}
}
}
if rows.len() != 2 {
return false;
}
// First row reflects write. Written rows have not progressed, and all
// writes occur at the same timestamp.
assert_eq!(rows[0].get::<_, Option<bool>>("mz_progressed"), Some(false));
// Two writes with the same data have their diffs compacted
assert_eq!(rows[0].get::<_, Option<i64>>("mz_diff"), Some(2));
// Second row reflects closing timestamp, manufactured by the read
assert_eq!(rows[1].get::<_, Option<bool>>("mz_progressed"), Some(true));
assert_eq!(rows[1].get::<_, Option<i64>>("mz_diff"), None);
true
};
client_interactive.batch_execute("CREATE TABLE t1 (data text)")?;
client_tail.batch_execute(
"COMMIT; BEGIN;
DECLARE c1 CURSOR FOR TAIL t1 WITH (PROGRESS);",
)?;
// Keep trying until you either panic or are able to verify the expected behavior.
loop {
client_interactive.execute("BEGIN", &[])?;
client_interactive.execute("INSERT INTO t1 VALUES ($1)", &[&"first".to_owned()])?;
client_interactive.execute("INSERT INTO t1 VALUES ($1)", &[&"first".to_owned()])?;
client_interactive.execute("COMMIT", &[])?;
let _ = client_interactive.query("SELECT * FROM T1", &[])?;
client_interactive.execute("BEGIN", &[])?;
client_interactive.execute("INSERT INTO t1 VALUES ($1)", &[&"second".to_owned()])?;
client_interactive.execute("INSERT INTO t1 VALUES ($1)", &[&"second".to_owned()])?;
client_interactive.execute("COMMIT", &[])?;
let first_rows = client_tail.query("FETCH ALL c1", &[])?;
let first_rows_verified = verify_rw_pair(&first_rows, "first");
let _ = client_interactive.query("SELECT * FROM t1", &[])?;
let second_rows = client_tail.query("FETCH ALL c1", &[])?;
let second_rows_verified = verify_rw_pair(&second_rows, "second");
if first_rows_verified && second_rows_verified {
let first_write_ts = first_rows[0].get::<_, MzTimestamp>("mz_timestamp");
let first_closed_ts = first_rows[1].get::<_, MzTimestamp>("mz_timestamp");
assert!(first_write_ts < first_closed_ts);
let second_write_ts = second_rows[0].get::<_, MzTimestamp>("mz_timestamp");
let second_closed_ts = second_rows[1].get::<_, MzTimestamp>("mz_timestamp");
assert!(first_closed_ts <= second_write_ts);
assert!(second_write_ts < second_closed_ts);
break;
}
}
// Ensure reads don't advance timestamp.
loop {
let first_read =
client_interactive.query("SELECT *, mz_logical_timestamp() FROM t1", &[])?;
let second_read =
client_interactive.query("SELECT *, mz_logical_timestamp() FROM t1", &[])?;
if first_read[0].get::<_, MzTimestamp>("mz_logical_timestamp")
== second_read[0].get::<_, MzTimestamp>("mz_logical_timestamp")
{
break;
}
}
Ok(())
}
// Tests that temporary views created by one connection cannot be viewed
// by another connection.
#[test]
fn test_temporary_views() -> Result<(), Box<dyn Error>> {
mz_ore::test::init_logging();
let server = util::start_server(util::Config::default())?;
let mut client_a = server.connect(postgres::NoTls)?;
let mut client_b = server.connect(postgres::NoTls)?;
client_a
.batch_execute("CREATE VIEW v AS VALUES (1, 'foo'), (2, 'bar'), (3, 'foo'), (1, 'bar')")?;
client_a.batch_execute("CREATE TEMPORARY VIEW temp_v AS SELECT * FROM v")?;
let query_v = "SELECT count(*) FROM v;";
let query_temp_v = "SELECT count(*) FROM temp_v;";
// Ensure that client_a can query v and temp_v.
let count: i64 = client_b.query_one(query_v, &[])?.get("count");
assert_eq!(4, count);
let count: i64 = client_a.query_one(query_temp_v, &[])?.get("count");
assert_eq!(4, count);
// Ensure that client_b can query v, but not temp_v.
let count: i64 = client_b.query_one(query_v, &[])?.get("count");
assert_eq!(4, count);
let err = client_b.query_one(query_temp_v, &[]).unwrap_db_error();
assert_eq!(err.message(), "unknown catalog item \'temp_v\'");
Ok(())
}
// Test EXPLAIN TIMESTAMP with tables. Mock time to verify initial table since
// is now(), not 0.
#[test]
fn test_explain_timestamp_table() -> Result<(), Box<dyn Error>> {
mz_ore::test::init_logging();
let timestamp = Arc::new(Mutex::new(1_000));
let now = {
let timestamp = Arc::clone(×tamp);
NowFn::from(move || *timestamp.lock().unwrap())
};
let config = util::Config::default().with_now(now);
let server = util::start_server(config)?;
let mut client = server.connect(postgres::NoTls)?;
let timestamp_re = Regex::new(r"\d{13}").unwrap();
client.batch_execute("CREATE TABLE t1 (i1 INT)")?;
let row = client.query_one("EXPLAIN TIMESTAMP FOR SELECT * FROM t1;", &[])?;
let explain: String = row.get(0);
let explain = timestamp_re.replace_all(&explain, "<TIMESTAMP>");
assert_eq!(
explain,
" timestamp: 1036
since:[ 1036]
upper:[ 0]
has table: true
table read ts: 1036
source materialize.public.t1 (u1, storage):
read frontier:[ 1036]
write frontier:[ 0]\n",
);
Ok(())
}
// Test that a query that causes a compute instance to panic will resolve
// the panic and allow the compute instance to restart (instead of crash loop
// forever) when a client is terminated (disconnects from the server) instead
// of cancelled (sends a pgwire cancel request on a new connection).
#[test]
fn test_github_12546() -> Result<(), Box<dyn Error>> {
mz_ore::test::init_logging();
let config = util::Config::default();
let server = util::start_server(config)?;
server.runtime.block_on(async {
let (client, conn_task) = server.connect_async(tokio_postgres::NoTls).await?;
client.batch_execute("CREATE TABLE test(a text);").await?;
client
.batch_execute("INSERT INTO test VALUES ('a');")
.await?;
let query = client.query("SELECT mz_internal.mz_panic(a) FROM test", &[]);
let timeout = tokio::time::timeout(Duration::from_secs(2), query);
// We expect the timeout to trigger because the query should be crashing the
// compute instance.
assert_eq!(
timeout.await.unwrap_err().to_string(),
"deadline has elapsed"
);
// Aborting the connection should cause its pending queries to be cancelled,
// allowing the compute instances to stop crashing while trying to execute
// them.
conn_task.abort();
// Need to await `conn_task` to actually deliver the `abort`.
let _ = conn_task.await;
// Make a new connection to verify the compute instance can now start.
let (client, _conn_task) = server.connect_async(tokio_postgres::NoTls).await?;
assert_eq!(
client
.query_one("SELECT count(*) FROM test", &[])
.await?
.get::<_, i64>(0),
1,
);
Ok::<_, Box<dyn Error>>(())
})?;
Ok(())
}
|
{
mz_ore::test::init_logging();
let config = util::Config::default().workers(2);
let server = util::start_server(config)?;
let mut client_writes = server.connect(postgres::NoTls)?;
let mut client_reads = server.connect(postgres::NoTls)?;
client_writes.batch_execute("CREATE TABLE t2 (data text NOT NULL)")?;
client_writes.batch_execute("INSERT INTO t2 VALUES ('data')")?;
client_reads.batch_execute(
"COMMIT; BEGIN;
DECLARE c2 CURSOR FOR TAIL t2 WITH (PROGRESS);",
)?;
#[derive(PartialEq)]
enum State {
WaitingForData,
WaitingForProgress,
Done,
}
let mut state = State::WaitingForData;
// Wait for one progress statement after seeing the data update.
// Alternatively, we could just check any progress statement to make sure
// that columns are in fact `Options`
while state != State::Done {
let row = client_reads.query_one("FETCH 1 c2", &[])?;
if !row.get::<_, bool>("mz_progressed") {
assert_eq!(row.get::<_, i64>("mz_diff"), 1);
assert_eq!(row.get::<_, String>("data"), "data");
state = State::WaitingForProgress;
} else if state == State::WaitingForProgress {
assert_eq!(row.get::<_, bool>("mz_progressed"), true);
assert_eq!(row.get::<_, Option<i64>>("mz_diff"), None);
assert_eq!(row.get::<_, Option<String>>("data"), None);
state = State::Done;
}
}
Ok(())
}
|
nodedensity_suite_test.go
|
/*
Copyright 2019 The KubeEdge Authors.
|
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodedensity
import (
"strconv"
"strings"
"testing"
"time"
"github.com/kubeedge/kubeedge/tests/e2e/utils"
. "github.com/kubeedge/kubeedge/tests/performance/common"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/api/core/v1"
)
//context to load config and access across the package
var (
ctx *utils.TestContext
cfg utils.Config
cloudHub string
)
func TestEdgecoreK8sDeployment(t *testing.T) {
var cloudCoreHostIP string
var podlist metav1.PodList
//var toTaint bool
RegisterFailHandler(Fail)
var _ = BeforeSuite(func() {
utils.InfoV6("Kubeedge deployment Load test Begin !!")
cfg = utils.LoadConfig()
ctx = utils.NewTestContext(cfg)
//apply label to all cluster nodes, use the selector to deploy all edgenodes to cluster nodes
err := ApplyLabel(ctx.Cfg.ApiServer2 + NodeHandler)
Expect(err).Should(BeNil())
//Create configMap for CloudCore
CloudConfigMap = "cloudcore-configmap-" + utils.GetRandomString(5)
CloudCoreDeployment = "cloudcore-deployment-" + utils.GetRandomString(5)
//Deploye cloudcore as a k8s resource to cluster-1
err = HandleCloudDeployment(CloudConfigMap, CloudCoreDeployment, ctx.Cfg.ApiServer,
ctx.Cfg.ApiServer+ConfigmapHandler, ctx.Cfg.ApiServer+DeploymentHandler, ctx.Cfg.CloudImageUrl, ctx.Cfg.NumOfNodes)
Expect(err).Should(BeNil())
time.Sleep(1 * time.Second)
//Get the cloudCore pod Node name and IP
podlist, err = utils.GetPods(ctx.Cfg.ApiServer+AppHandler, "")
Expect(err).To(BeNil())
for _, pod := range podlist.Items {
if strings.Contains(pod.Name, "cloudcore-deployment") {
cloudCoreHostIP = pod.Status.HostIP
}
break
}
utils.CheckPodRunningState(ctx.Cfg.ApiServer+AppHandler, podlist)
time.Sleep(5 * time.Second)
//Create service for cloud
err = utils.ExposeCloudService(CloudCoreDeployment, ctx.Cfg.ApiServer+ServiceHandler)
Expect(err).Should(BeNil())
//Create a nodePort Service to access the cloud Service from the cluster nodes
nodePort := utils.GetServicePort(CloudCoreDeployment, ctx.Cfg.ApiServer+ServiceHandler)
str2 := strconv.FormatInt(int64(nodePort), 10)
cloudHub = "wss://" + cloudCoreHostIP + ":" + str2
//Deploye edgecore as a k8s resource to cluster-2
})
AfterSuite(func() {
By("Kubeedge deployment Load test End !!....!")
DeleteCloudDeployment(ctx.Cfg.ApiServer)
utils.CheckPodDeleteState(ctx.Cfg.ApiServer+AppHandler, podlist)
})
RunSpecs(t, "kubeedge Performace Load test Suite")
}
|
Licensed under the Apache License, Version 2.0 (the "License");
|
dbsubnetgroup.go
|
/*
Copyright 2019 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dbsubnetgroup
import (
"context"
"errors"
"strings"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/rds"
rdstypes "github.com/aws/aws-sdk-go-v2/service/rds/types"
"github.com/crossplane/provider-aws/apis/database/v1beta1"
awsclients "github.com/crossplane/provider-aws/pkg/clients"
)
// Client is the external client used for DBSubnetGroup Custom Resource
type Client interface {
CreateDBSubnetGroup(context.Context, *rds.CreateDBSubnetGroupInput, ...func(*rds.Options)) (*rds.CreateDBSubnetGroupOutput, error)
DeleteDBSubnetGroup(context.Context, *rds.DeleteDBSubnetGroupInput, ...func(*rds.Options)) (*rds.DeleteDBSubnetGroupOutput, error)
DescribeDBSubnetGroups(context.Context, *rds.DescribeDBSubnetGroupsInput, ...func(*rds.Options)) (*rds.DescribeDBSubnetGroupsOutput, error)
ModifyDBSubnetGroup(context.Context, *rds.ModifyDBSubnetGroupInput, ...func(*rds.Options)) (*rds.ModifyDBSubnetGroupOutput, error)
AddTagsToResource(context.Context, *rds.AddTagsToResourceInput, ...func(*rds.Options)) (*rds.AddTagsToResourceOutput, error)
ListTagsForResource(context.Context, *rds.ListTagsForResourceInput, ...func(*rds.Options)) (*rds.ListTagsForResourceOutput, error)
}
// NewClient returns a new client using AWS credentials as JSON encoded data.
func NewClient(cfg aws.Config) Client {
return rds.NewFromConfig(cfg)
}
// IsDBSubnetGroupNotFoundErr returns true if the error is because the item doesn't exist
func IsDBSubnetGroupNotFoundErr(err error) bool {
var nff *rdstypes.DBSubnetGroupNotFoundFault
return errors.As(err, &nff)
}
// IsDBSubnetGroupUpToDate checks whether there is a change in any of the modifiable fields.
func IsDBSubnetGroupUpToDate(p v1beta1.DBSubnetGroupParameters, sg rdstypes.DBSubnetGroup, tags []rdstypes.Tag) bool { // nolint:gocyclo
if p.Description != awsclients.StringValue(sg.DBSubnetGroupDescription) {
return false
}
if len(p.SubnetIDs) != len(sg.Subnets) {
return false
}
pSubnets := make(map[string]struct{}, len(p.SubnetIDs))
for _, id := range p.SubnetIDs {
pSubnets[id] = struct{}{}
}
for _, id := range sg.Subnets {
if _, ok := pSubnets[aws.ToString(id.SubnetIdentifier)]; !ok {
return false
}
}
if len(p.Tags) != len(tags) {
return false
}
pTags := make(map[string]string, len(p.Tags))
for _, tag := range p.Tags {
pTags[tag.Key] = tag.Value
}
for _, tag := range tags {
val, ok := pTags[aws.ToString(tag.Key)]
if !ok || !strings.EqualFold(val, aws.ToString(tag.Value))
|
}
return true
}
// GenerateObservation is used to produce v1alpha3.RDSInstanceObservation from
// rds.DBSubnetGroup
func GenerateObservation(sg rdstypes.DBSubnetGroup) v1beta1.DBSubnetGroupObservation {
o := v1beta1.DBSubnetGroupObservation{
State: aws.ToString(sg.SubnetGroupStatus),
ARN: aws.ToString(sg.DBSubnetGroupArn),
VPCID: aws.ToString(sg.VpcId),
}
if len(sg.Subnets) != 0 {
o.Subnets = make([]v1beta1.Subnet, len(sg.Subnets))
for i, val := range sg.Subnets {
o.Subnets[i] = v1beta1.Subnet{
SubnetID: aws.ToString(val.SubnetIdentifier),
SubnetStatus: aws.ToString(val.SubnetStatus),
}
}
}
return o
}
// LateInitialize fills the empty fields in *v1beta1.DBSubnetGroupParameters with
func LateInitialize(in *v1beta1.DBSubnetGroupParameters, sg *rdstypes.DBSubnetGroup) {
if sg == nil {
return
}
in.Description = awsclients.LateInitializeString(in.Description, sg.DBSubnetGroupDescription)
if len(in.SubnetIDs) == 0 && len(sg.Subnets) != 0 {
in.SubnetIDs = make([]string, len(sg.Subnets))
for i, val := range sg.Subnets {
in.SubnetIDs[i] = aws.ToString(val.SubnetIdentifier)
}
}
}
|
{
return false
}
|
project.go
|
package api
import (
"github.com/andygrunwald/go-jira"
)
type ListProjectOptions struct {
Category []string
}
type ProjectListEntry struct {
Expand string `json:"expand" structs:"expand"`
Self string `json:"self" structs:"self"`
ID string `json:"id" structs:"id"`
Key string `json:"key" structs:"key"`
Name string `json:"name" structs:"name"`
AvatarUrls jira.AvatarUrls `json:"avatarUrls" structs:"avatarUrls"`
ProjectTypeKey string `json:"projectTypeKey" structs:"projectTypeKey"`
ProjectCategory jira.ProjectCategory `json:"projectCategory,omitempty" structs:"projectsCategory,omitempty"`
IssueTypes []jira.IssueType `json:"issueTypes,omitempty" structs:"issueTypes,omitempty"`
}
func ListProjects(host string, category []string) ([]ProjectListEntry, error) {
client, err := NewClient(host)
if err != nil {
return nil, ApiError(err)
}
projects, _, err := client.Project.GetList()
if err != nil {
return nil, ApiError(err)
}
result := make([]ProjectListEntry, 0)
for _, project := range *projects {
if category == nil || len(category) == 0 {
result = append(result, ProjectListEntry(project))
continue
}
for _, c := range category {
if c == "" || c == project.ProjectCategory.Name {
result = append(result, ProjectListEntry(project))
}
}
}
return result, ApiError(err)
}
func GetProject(host string, project string) (*jira.Project, error) {
client, err := NewClient(host)
if err != nil {
return nil, ApiError(err)
}
jiraProject, _, err := client.Project.Get(project)
return jiraProject, ApiError(err)
}
func ListProjectCategories(host string) ([]jira.ProjectCategory, error) {
client, err := NewClient(host)
if err != nil {
return nil, ApiError(err)
}
req, _ := client.NewRequest("GET", "rest/api/2/projectCategory", nil)
projectCategories := new([]jira.ProjectCategory)
_, err = client.Do(req, projectCategories)
if err != nil {
return nil, err
|
}
return *projectCategories, nil
}
| |
receiving.rs
|
//! Tests auto-converted from "sass-spec/spec/non_conformant/mixin/content/arguments/receiving.hrx"
#[test]
#[ignore] // unexepected error
fn
|
() {
assert_eq!(
crate::rsass(
"// Content blocks\' argument lists can define arguments in in all the same ways\
\n// as they\'re defined in any other argument list.\
\n\
\nwith-defaults {\
\n nothing-passed {\
\n @mixin mixin {\
\n @content;\
\n }\
\n\
\n @include mixin using ($arg1: value1, $arg2: value2) {\
\n arg1: $arg1;\
\n arg2: $arg2;\
\n }\
\n }\
\n\
\n partial-override {\
\n @mixin mixin {\
\n @content($arg2: other2);\
\n }\
\n\
\n @include mixin using ($arg1: value1, $arg2: value2) {\
\n arg1: $arg1;\
\n arg2: $arg2;\
\n }\
\n }\
\n\
\n total-override {\
\n @mixin mixin {\
\n @content(other1, other2);\
\n }\
\n\
\n @include mixin using ($arg1: value1, $arg2: value2) {\
\n arg1: $arg1;\
\n arg2: $arg2;\
\n }\
\n }\
\n}\
\n\
\nwith-splat {\
\n nothing-passed {\
\n @mixin mixin {\
\n @content;\
\n }\
\n\
\n @include mixin using ($args...) {\
\n positional: inspect($args);\
\n keywords: inspect(keywords($args));\
\n }\
\n }\
\n\
\n positional-passed {\
\n @mixin mixin {\
\n @content(value1, value2, value3);\
\n }\
\n\
\n @include mixin using ($args...) {\
\n positional: inspect($args);\
\n keywords: inspect(keywords($args));\
\n }\
\n }\
\n\
\n named-passed {\
\n @mixin mixin {\
\n @content($arg1: value1, $arg2: value2);\
\n }\
\n\
\n @include mixin using ($args...) {\
\n positional: inspect($args);\
\n keywords: inspect(keywords($args));\
\n }\
\n }\
\n\
\n both-passed {\
\n @mixin mixin {\
\n @content(value1, $arg2: value2);\
\n }\
\n\
\n @include mixin using ($args...) {\
\n positional: inspect($args);\
\n keywords: inspect(keywords($args));\
\n }\
\n }\
\n}\
\n"
)
.unwrap(),
"with-defaults nothing-passed {\
\n arg1: value1;\
\n arg2: value2;\
\n}\
\nwith-defaults partial-override {\
\n arg1: value1;\
\n arg2: other2;\
\n}\
\nwith-defaults total-override {\
\n arg1: other1;\
\n arg2: other2;\
\n}\
\nwith-splat nothing-passed {\
\n positional: ();\
\n keywords: ();\
\n}\
\nwith-splat positional-passed {\
\n positional: value1, value2, value3;\
\n keywords: ();\
\n}\
\nwith-splat named-passed {\
\n positional: ();\
\n keywords: (arg1: value1, arg2: value2);\
\n}\
\nwith-splat both-passed {\
\n positional: (value1,);\
\n keywords: (arg2: value2);\
\n}\
\n"
);
}
|
test
|
config.go
|
package aws
import (
"net/http"
"time"
"bosun.org/_third_party/github.com/aws/aws-sdk-go/aws/credentials"
)
// The default number of retries for a service. The value of -1 indicates that
// the service specific retry default will be used.
const DefaultRetries = -1
// A Config provides service configuration for service clients. By default,
// all clients will use the {defaults.DefaultConfig} structure.
type Config struct {
// The credentials object to use when signing requests. Defaults to
// {defaults.DefaultChainCredentials}.
Credentials *credentials.Credentials
// An optional endpoint URL (hostname only or fully qualified URI)
// that overrides the default generated endpoint for a client. Set this
// to `""` to use the default generated endpoint.
//
// @note You must still provide a `Region` value when specifying an
// endpoint for a client.
Endpoint *string
// The region to send requests to. This parameter is required and must
// be configured globally or on a per-client basis unless otherwise
// noted. A full list of regions is found in the "Regions and Endpoints"
// document.
//
// @see http://docs.aws.amazon.com/general/latest/gr/rande.html
// AWS Regions and Endpoints
Region *string
// Set this to `true` to disable SSL when sending requests. Defaults
// to `false`.
DisableSSL *bool
// The HTTP client to use when sending requests. Defaults to
// `http.DefaultClient`.
HTTPClient *http.Client
// An integer value representing the logging level. The default log level
// is zero (LogOff), which represents no logging. To enable logging set
// to a LogLevel Value.
LogLevel *LogLevelType
// The logger writer interface to write logging messages to. Defaults to
// standard out.
Logger Logger
// The maximum number of times that a request will be retried for failures.
// Defaults to -1, which defers the max retry setting to the service specific
// configuration.
MaxRetries *int
// Disables semantic parameter validation, which validates input for missing
// required fields and/or other semantic request input errors.
DisableParamValidation *bool
// Disables the computation of request and response checksums, e.g.,
// CRC32 checksums in Amazon DynamoDB.
DisableComputeChecksums *bool
// Set this to `true` to force the request to use path-style addressing,
// i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will
// use virtual hosted bucket addressing when possible
// (`http://BUCKET.s3.amazonaws.com/KEY`).
//
// @note This configuration option is specific to the Amazon S3 service.
// @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
// Amazon S3: Virtual Hosting of Buckets
S3ForcePathStyle *bool
SleepDelay func(time.Duration)
}
// NewConfig returns a new Config pointer that can be chained with builder methods to
// set multiple configuration values inline without using pointers.
//
// svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10))
//
func NewConfig() *Config {
return &Config{}
}
// WithCredentials sets a config Credentials value returning a Config pointer
// for chaining.
func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
c.Credentials = creds
return c
}
// WithEndpoint sets a config Endpoint value returning a Config pointer for
// chaining.
func (c *Config) WithEndpoint(endpoint string) *Config {
c.Endpoint = &endpoint
return c
}
// WithRegion sets a config Region value returning a Config pointer for
// chaining.
func (c *Config) WithRegion(region string) *Config {
c.Region = ®ion
return c
}
// WithDisableSSL sets a config DisableSSL value returning a Config pointer
// for chaining.
func (c *Config) WithDisableSSL(disable bool) *Config {
c.DisableSSL = &disable
return c
}
// WithHTTPClient sets a config HTTPClient value returning a Config pointer
// for chaining.
func (c *Config) WithHTTPClient(client *http.Client) *Config {
c.HTTPClient = client
return c
}
// WithMaxRetries sets a config MaxRetries value returning a Config pointer
// for chaining.
func (c *Config) WithMaxRetries(max int) *Config {
c.MaxRetries = &max
return c
}
// WithDisableParamValidation sets a config DisableParamValidation value
// returning a Config pointer for chaining.
func (c *Config) WithDisableParamValidation(disable bool) *Config {
c.DisableParamValidation = &disable
return c
}
// WithDisableComputeChecksums sets a config DisableComputeChecksums value
// returning a Config pointer for chaining.
func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
c.DisableComputeChecksums = &disable
return c
}
// WithLogLevel sets a config LogLevel value returning a Config pointer for
// chaining.
func (c *Config) WithLogLevel(level LogLevelType) *Config {
c.LogLevel = &level
return c
}
// WithLogger sets a config Logger value returning a Config pointer for
// chaining.
func (c *Config) WithLogger(logger Logger) *Config {
c.Logger = logger
return c
}
// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
// pointer for chaining.
func (c *Config) WithS3ForcePathStyle(force bool) *Config {
c.S3ForcePathStyle = &force
return c
}
// WithSleepDelay overrides the function used to sleep while waiting for the
// next retry. Defaults to time.Sleep.
func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
c.SleepDelay = fn
return c
}
// Merge returns a new Config with the other Config's attribute values merged into
// this Config. If the other Config's attribute is nil it will not be merged into
// the new Config to be returned.
func (c Config) Merge(other *Config) *Config {
if other == nil {
return &c
|
if other.Credentials != nil {
dst.Credentials = other.Credentials
}
if other.Endpoint != nil {
dst.Endpoint = other.Endpoint
}
if other.Region != nil {
dst.Region = other.Region
}
if other.DisableSSL != nil {
dst.DisableSSL = other.DisableSSL
}
if other.HTTPClient != nil {
dst.HTTPClient = other.HTTPClient
}
if other.LogLevel != nil {
dst.LogLevel = other.LogLevel
}
if other.Logger != nil {
dst.Logger = other.Logger
}
if other.MaxRetries != nil {
dst.MaxRetries = other.MaxRetries
}
if other.DisableParamValidation != nil {
dst.DisableParamValidation = other.DisableParamValidation
}
if other.DisableComputeChecksums != nil {
dst.DisableComputeChecksums = other.DisableComputeChecksums
}
if other.S3ForcePathStyle != nil {
dst.S3ForcePathStyle = other.S3ForcePathStyle
}
if other.SleepDelay != nil {
dst.SleepDelay = other.SleepDelay
}
return &dst
}
// Copy will return a shallow copy of the Config object.
func (c Config) Copy() *Config {
dst := c
return &dst
}
|
}
dst := c
|
mpijob.py
|
from copy import deepcopy
from os import environ
from pprint import pprint
import yaml
from kubernetes import client, config
from kubernetes.client.rest import ApiException
_mpijob_template = {
'apiVersion': 'kubeflow.org/v1alpha1',
'kind': 'MPIJob',
'metadata': {
'name': '',
'namespace': 'default-tenant'
},
'spec': {
'replicas': 1,
'template': {
'spec': {
'containers': [{
'image': 'iguaziodocker/horovod:0.1.1',
'name': '',
'command': [],
'volumeMounts': [{'name': 'v3io', 'mountPath': '/User'}],
'workingDir': '/User',
'securityContext': {
'capabilities': {'add': ['IPC_LOCK']}},
'resources': {
'limits': {'nvidia.com/gpu': 1}}}],
'volumes': [{
'name': 'v3io',
'flexVolume': {
'driver': 'v3io/fuse',
'options': {
'container': 'users',
'subPath': '',
'accessKey': '',
}
}}]
}}}}
class MpiJob:
"""
A wrapper over Kubernetes MPIJob (Horovod).
Example:
from mpijob import MpiJob
job = MpiJob('myname', 'img', ['a','b'])
print(job.to_yaml())
job.submit()
"""
group = 'kubeflow.org'
version = 'v1alpha1'
plural = 'mpijobs'
def
|
(self, name, image=None, command=None,
replicas=1, namespace='default-tenant'):
self.api_instance = None
self.name = name
self.namespace = namespace
self._struct = deepcopy(_mpijob_template)
self._struct['metadata'] = {'name': name, 'namespace': namespace}
self._update_container('name', name)
if image:
self._update_container('image', image)
if command:
self._update_container('command', ['mpirun','python'] + command)
if replicas:
self._struct['spec']['replicas'] = replicas
self._update_access_token(environ.get('V3IO_ACCESS_KEY',''))
self._update_running_user(environ.get('V3IO_USERNAME',''))
def _update_container(self, key, value):
self._struct['spec']['template']['spec']['containers'][0][key] = value
def _update_access_token(self, token):
self._struct['spec']['template']['spec']['volumes'][0]['flexVolume']['options']['accessKey'] = token
def _update_running_user(self, username):
self._struct['spec']['template']['spec']['volumes'][0]['flexVolume']['options']['subPath'] = '/' + username
def volume(self, mount='/User', volpath='~/', access_key=''):
self._update_container('volumeMounts', [{'name': 'v3io', 'mountPath': mount}])
if volpath.startswith('~/'):
v3io_home = environ.get('V3IO_HOME', '')
volpath = v3io_home + volpath[1:]
container, subpath = split_path(volpath)
access_key = access_key or environ.get('V3IO_ACCESS_KEY','')
vol = {'name': 'v3io', 'flexVolume': {
'driver': 'v3io/fuse',
'options': {
'container': container,
'subPath': subpath,
'accessKey': access_key,
}
}}
self._struct['spec']['template']['spec']['volumes'] = [vol]
return self
def gpus(self, num, gpu_type='nvidia.com/gpu'):
self._update_container('resources', {'limits' : {gpu_type: num}})
return self
def replicas(self, replicas_num):
self._struct['spec']['replicas'] = replicas_num
return self
def working_dir(self, working_dir):
self._update_container('workingDir', working_dir)
return self
def to_dict(self):
return self._struct
def to_yaml(self):
return yaml.dump(self.to_dict(), default_flow_style=False, sort_keys=False)
def submit(self):
config.load_incluster_config()
self.api_instance = client.CustomObjectsApi()
try:
api_response = self.api_instance.create_namespaced_custom_object(
MpiJob.group, MpiJob.version, self.namespace, 'mpijobs', self.to_dict())
pprint(api_response)
except ApiException as e:
print("Exception when creating MPIJob: %s" % e)
def delete(self):
try:
# delete the mpi job
body = client.V1DeleteOptions()
api_response = self.api_instance.delete_namespaced_custom_object(
MpiJob.group, MpiJob.version, self.namespace, MpiJob.plural, self.name, body)
pprint(api_response)
except ApiException as e:
print("Exception when calling CustomObjectsApi->delete_namespaced_custom_object: %s\\n" % e)
def split_path(mntpath=''):
if mntpath[0] == '/':
mntpath = mntpath[1:]
paths = mntpath.split('/')
container = paths[0]
subpath = ''
if len(paths) > 1:
subpath = mntpath[len(container):]
return container, subpath
|
__init__
|
RunEnvironment.ts
|
/**
* 运行环境枚举
*/
export enum RunEnvironment
{
/**
* 在feng3d模式下运行
*/
feng3d = 1 << 0,
/**
* 运行在编辑器中
*/
editor = 1 << 1,
/**
* 在所有环境中运行
|
}
|
*/
all = (1 << 8) - 1,
|
main.ts
|
/// <reference path="../typings/index.d.ts" />
//---------------------------------------------------------------------
// <copyright file="main.ts">
// This code is licensed under the MIT License.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
// PARTICULAR PURPOSE AND NONINFRINGEMENT.
// </copyright>
// <summary>
// TypeScript class that adds the menu action and shows the dialog.
// </summary>
//---------------------------------------------------------------------
import GitFolderManager = require("scripts/GitFolderManager");
import TFVCFolderManager = require("scripts/TFVCFolderManager");
import Dialog = require("scripts/dialog");
import TelemetryClient = require("scripts/TelemetryClient");
export enum SourceControl { Git, TFVC };
export class AddFolderMenu {
private actionContext;
public TelemetryClient = TelemetryClient.TelemetryClient.getClient();
public execute(actionContext) {
actionContext.getSourceItemContext().then((sourceContext) => {
this.actionContext = sourceContext;
this.showDialog();
});
}
private getSourceControlType() {
if (this.actionContext.gitRepository) {
return SourceControl.Git;
}
return SourceControl.TFVC
}
private showDialog() {
VSS.getService("ms.vss-web.dialog-service").then((dialogSvc: IHostDialogService) => {
var createNewFolderDialog: Dialog.AddFolderDialog;
var sourceControlType = this.getSourceControlType();
// contribution info
var extInfo = VSS.getExtensionContext();
var dialogContributionId = extInfo.publisherId + "." + extInfo.extensionId + "." + "createNewFolderDialog";
var callBack;
var folderManager = null;
if (sourceControlType == SourceControl.Git) {
folderManager = new GitFolderManager.GitFolderManager(this.actionContext);
callBack = folderManager.dialogCallback;
this.TelemetryClient.trackEvent("Git_Dialog_Opened");
}
|
}
var dialogOptions = {
title: "Create new folder",
draggable: true,
modal: true,
okText: "Create",
cancelText: "Cancel",
okCallback: callBack,
defaultButton: "ok",
getDialogResult: function () {
return createNewFolderDialog ? createNewFolderDialog.getFormInputs() : null;
},
};
dialogSvc.openDialog(dialogContributionId, dialogOptions).then((dialog) => {
dialog.getContributionInstance("createNewFolderDialog").then((createNewFolderDialogInstance: Dialog.AddFolderDialog) => {
createNewFolderDialog = createNewFolderDialogInstance;
createNewFolderDialog.setVersionControl(sourceControlType);
createNewFolderDialog.setFolderManager(folderManager);
var path = "";
if (sourceControlType == SourceControl.Git) {
path = this.actionContext.gitRepository.name + this.actionContext.item.path;
}
else {
path = this.actionContext.item.path;
}
createNewFolderDialog.setCurrentPath(path);
createNewFolderDialog.onStateChanged(function (isValid) {
dialog.updateOkButton(isValid);
});
createNewFolderDialog.initialValidate();
});
})
})
}
}
VSS.register("addFolder", function (context) {
return new AddFolderMenu();
});
VSS.notifyLoadSucceeded();
|
else {
folderManager = new TFVCFolderManager.TFVCFolderManager(this.actionContext);
callBack = folderManager.dialogCallback;
this.TelemetryClient.trackEvent("TFVC_Dialog_Opened");
|
server.js
|
'use strict';
const express = require('express');
const app = express();
const cors = require('cors');
|
const mongoose = require('mongoose');
require('dotenv').config();
app.use(cors());
const PORT = process.env.PORT;
const MONGO_URL = process.env.MONGO_URL;
const DB_NAME = process.env.DB_NAME;
/**
* we are using mongoose package/ library to connect to
* Mongo DB.
*/
mongoose.connect(`${MONGO_URL}/${DB_NAME}`);
// destructing assignment
const { getCats } = require('./controllers/cat.controllers');
const getIndex = require('./controllers/index.controller');
const seedCat = require('./helper/catSeed.seed');
// Call this function only once to not have duplicate Data
// seedCat();
// Proof of life Endpoint
app.get('/', getIndex);
/**
* Cat CRUD API Endpoint
*/
app.get('/cat', getCats)
// kick start the express server to work
app.listen(PORT, () => {
console.log(`Server started on port ${PORT}`);
});
| |
main.py
|
import os
import logging
import sentry_sdk
from aiogram import Bot, Dispatcher, executor, types
from datetime import datetime, timedelta
from pypi_tools.logic import remove_track_for_package
import pypi_tools.data as d
from pypi_tools.helpers import validate_input
import pypi_tools.vizualizer as v
import pypi_tools.readme as r
import asyncio
import aioredis
logging.basicConfig(level=logging.INFO)
redis_host = f"redis://{os.environ.get('REDIS_HOST')}"
#sentry_sdk.init(os.environ["SENTRY_PATH"])
bot = Bot(token=os.environ["BOT_API_KEY"], parse_mode="html")
dp = Dispatcher(bot)
@dp.message_handler(commands=['start'])
async def send_welcome(message):
text = f"Hello, {message.chat.first_name} {message.chat.last_name}! \n" \
f"Welcome to <b>PyPi Tools Bot.</b>\n\n" \
"This Bot created special to obtain information from Official Python PyPi Server\n" \
+ r.help_text + r.current_version
await message.answer(text)
@dp.message_handler(commands=['help'])
async def send_welcome(message):
await message.answer(r.help_text)
@dp.message_handler(lambda message: message.text and (
'/stats' in message.text.lower() or 'stats:' in message.text.lower()))
@validate_input(command='stats',
known_sub_commands={'@any_number': lambda num: num})
async def send_package_stats(message):
output = message.output
sub_command = message.sub_command or 5
if len(output.split()) == 1:
days = sub_command
package_name = output
current_date = datetime.now().date()
data_ = await d.cached_package_downloads_stats(package_name, days, current_date)
output = d.stats_text(data_, package_name, days)
await message.answer(output)
@dp.message_handler(lambda message: message.text and (
'/plot' in message.text.lower() or 'plot:' in message.text.lower()))
@validate_input(command='plot',
known_sub_commands={'@any_number': lambda num: num})
async def send_package_stats_with_graph(message):
output = message.output
sub_command = message.sub_command or 5
if len(output.split()) == 1:
days = sub_command
package_name = output
current_date = datetime.now().date()
data_ = await d.cached_package_downloads_stats(package_name, days, current_date)
output = d.stats_text(data_, package_name, days)
temp = 'temp/'
os.makedirs(temp, exist_ok=True)
# for pandas range
start_date = current_date - timedelta(days=2)
file_name = f'{temp}/{package_name}:{current_date - timedelta(days=1)}:{days}.png'
if not os.path.isfile(file_name):
file_name = v.generate_graph(start_date, [item for _, item in data_.items()][::-1], file_name)
file_ = types.InputFile(file_name)
await message.answer(output)
await message.answer_photo(file_)
@dp.message_handler(commands=['random'])
async def command(message):
output = await d.get_random_package()
await message.answer(output)
@dp.message_handler(commands=['search', 'search:detailed'])
@validate_input(command='search',
known_sub_commands={'detailed': lambda _package_name: d.request_package_info_from_pypi(
_package_name, detailed=True)},
additional_error="Or use with sub-command to get detailed information:"
"/search:detailed aiohttp")
async def search_command(message):
|
@dp.message_handler(commands=['releases', 'releases:full'])
@validate_input(command='releases',
known_sub_commands={'full': 'full'},
additional_error="Or use with sub-command to get full list of releases:"
"/releases:full aiohttp")
async def releases_command(message):
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
releases = await d.get_release_list(package_name=package_name)
if sub_command and sub_command == 'full':
output = f"Full Releases list for Package {package_name}\n\n"
for version, v_date in releases.items():
output += f"<b>{version}</b>: {v_date}\n"
else:
output = f"Last 7 Releases for Package {package_name}\n\n"
for num, items in enumerate(list(releases.items())):
if num > 7:
break
version, v_date = items
output += f"<b>{version}</b>: {v_date}\n"
await message.answer(output)
track_sub_commands = {'stop': lambda key: remove_track_for_package(key),
'nodev': 'nodev'}
@dp.message_handler(commands=['track', 'track:stop', 'track:nodev'])
@validate_input(command='track',
known_sub_commands=track_sub_commands,
additional_error="Or use with sub-command to stop track a package releases"
"/track:stop aiohttp")
async def track_command(message):
""" handler to react on /track command and it sub-commands"""
pool = await aioredis.create_redis_pool(redis_host)
with await pool as redis:
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
chat_id = str(message.chat.id)
key = chat_id + ":" + package_name
if sub_command and sub_command != 'nodev':
output = await sub_command(key)
else:
nodev = False
if sub_command:
nodev = True
versions = await d.get_release_list(package_name, nodev)
if versions is None:
output = f'Package {package_name} does not exists'
else:
current_version = d.get_last_release_version(versions)
output = f"Current {package_name} version is {current_version} \n" \
"You will be announced with new version release"
version = current_version[0]
if nodev:
version = version + ':nodev'
await redis.set(key, version)
await message.answer(output)
@dp.message_handler()
async def echo_all(message: types.Message):
await message.answer(message.text)
if __name__ == '__main__':
try:
executor.start_polling(dp, skip_updates=True)
except Exception as e:
sentry_sdk.capture_exception(e)
|
output = message.output
sub_command = message.sub_command
if len(output.split()) == 1:
package_name = output
if sub_command:
output = await sub_command(package_name)
else:
output = await d.request_package_info_from_pypi(package_name)
await message.answer(output)
|
ModifyDeviceCaptureRequest.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvs.endpoint import endpoint_data
class ModifyDeviceCaptureRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'vs', '2018-12-12', 'ModifyDeviceCapture','vs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Video(self):
return self.get_query_params().get('Video')
def set_Video(self,Video):
self.add_query_param('Video',Video)
def get_Id(self):
return self.get_query_params().get('Id')
def set_Id(self,Id):
self.add_query_param('Id',Id)
def get_Image(self):
return self.get_query_params().get('Image')
def set_Image(self,Image):
|
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
|
self.add_query_param('Image',Image)
|
Ui_DDSMonitor.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Z:\Users\Yintai Zhang\Research\ExperimentManger_Test_2\DDSMonitor.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_DDSMonitor(object):
def
|
(self, DDSMonitor):
DDSMonitor.setObjectName("DDSMonitor")
DDSMonitor.resize(1179, 846)
self.Monitor_TextBrowser = QtWidgets.QTextBrowser(DDSMonitor)
self.Monitor_TextBrowser.setGeometry(QtCore.QRect(20, 90, 1141, 731))
self.Monitor_TextBrowser.setObjectName("Monitor_TextBrowser")
self.ChooseDDS_ComboBox = QtWidgets.QComboBox(DDSMonitor)
self.ChooseDDS_ComboBox.setGeometry(QtCore.QRect(100, 30, 181, 22))
self.ChooseDDS_ComboBox.setObjectName("ChooseDDS_ComboBox")
self.ChooseDDS_label = QtWidgets.QLabel(DDSMonitor)
self.ChooseDDS_label.setGeometry(QtCore.QRect(30, 30, 61, 16))
self.ChooseDDS_label.setObjectName("ChooseDDS_label")
self.RefreshTime_label = QtWidgets.QLabel(DDSMonitor)
self.RefreshTime_label.setGeometry(QtCore.QRect(310, 30, 71, 16))
self.RefreshTime_label.setObjectName("RefreshTime_label")
self.RefreshTime_SpinBox = QtWidgets.QDoubleSpinBox(DDSMonitor)
self.RefreshTime_SpinBox.setGeometry(QtCore.QRect(390, 30, 101, 22))
self.RefreshTime_SpinBox.setObjectName("RefreshTime_SpinBox")
self.Start_Button = QtWidgets.QPushButton(DDSMonitor)
self.Start_Button.setGeometry(QtCore.QRect(530, 30, 75, 23))
self.Start_Button.setObjectName("Start_Button")
self.Stop_Button = QtWidgets.QPushButton(DDSMonitor)
self.Stop_Button.setGeometry(QtCore.QRect(620, 30, 75, 23))
self.Stop_Button.setObjectName("Stop_Button")
self.Current_Button = QtWidgets.QPushButton(DDSMonitor)
self.Current_Button.setGeometry(QtCore.QRect(710, 30, 75, 23))
self.Current_Button.setObjectName("Current_Button")
self.retranslateUi(DDSMonitor)
QtCore.QMetaObject.connectSlotsByName(DDSMonitor)
def retranslateUi(self, DDSMonitor):
_translate = QtCore.QCoreApplication.translate
DDSMonitor.setWindowTitle(_translate("DDSMonitor", "DDS Monitor"))
self.Monitor_TextBrowser.setHtml(_translate("DDSMonitor", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:16pt;\">---DDS Monitor---</span></p></body></html>"))
self.ChooseDDS_label.setText(_translate("DDSMonitor", "Choose DDS"))
self.RefreshTime_label.setText(_translate("DDSMonitor", "Refresh Time"))
self.Start_Button.setText(_translate("DDSMonitor", "Start"))
self.Stop_Button.setText(_translate("DDSMonitor", "Stop"))
self.Current_Button.setText(_translate("DDSMonitor", "Current"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
DDSMonitor = QtWidgets.QDialog()
ui = Ui_DDSMonitor()
ui.setupUi(DDSMonitor)
DDSMonitor.show()
sys.exit(app.exec_())
|
setupUi
|
http_request_event_test.go
|
package logger
import (
"bytes"
"net/http"
"net/url"
"sync"
"testing"
"time"
"github.com/blend/go-sdk/assert"
)
func TestWebRequestEventListener(t *testing.T) {
assert := assert.New(t)
wg := sync.WaitGroup{}
wg.Add(4)
textBuffer := bytes.NewBuffer(nil)
jsonBuffer := bytes.NewBuffer(nil)
all := New().WithFlags(AllFlags()).WithRecoverPanics(false).
WithWriter(NewTextWriter(textBuffer)).
WithWriter(NewJSONWriter(jsonBuffer))
defer all.Close()
all.Listen(HTTPRequest, "default", NewHTTPRequestEventListener(func(hre *HTTPRequestEvent) {
defer wg.Done()
assert.Equal(HTTPRequest, hre.Flag())
assert.NotNil(hre.Request())
assert.Equal("test.com", hre.Request().Host)
}))
go func() {
defer wg.Done()
all.Trigger(NewHTTPRequestEvent(&http.Request{Host: "test.com", URL: &url.URL{}}))
}()
go func() {
defer wg.Done()
all.Trigger(NewHTTPRequestEvent(&http.Request{Host: "test.com", URL: &url.URL{}}))
}()
wg.Wait()
all.Drain()
assert.NotEmpty(textBuffer.String())
assert.NotEmpty(jsonBuffer.String())
}
func TestWebRequestEventInterfaces(t *testing.T)
|
func TestWebRequestEventProperties(t *testing.T) {
assert := assert.New(t)
e := NewHTTPRequestEvent(nil)
assert.False(e.Timestamp().IsZero())
assert.True(e.WithTimestamp(time.Time{}).Timestamp().IsZero())
assert.Empty(e.Labels())
assert.Equal("bar", e.WithLabel("foo", "bar").Labels()["foo"])
assert.Empty(e.Annotations())
assert.Equal("zar", e.WithAnnotation("moo", "zar").Annotations()["moo"])
assert.Equal(HTTPRequest, e.Flag())
assert.Equal(Error, e.WithFlag(Error).Flag())
assert.Empty(e.Headings())
assert.Equal([]string{"Heading"}, e.WithHeadings("Heading").Headings())
assert.Nil(e.Request())
assert.NotNil(e.WithRequest(&http.Request{}).Request())
assert.Nil(e.State())
assert.Equal("foo", e.WithState(map[interface{}]interface{}{"bar": "foo"}).State()["bar"])
assert.Empty(e.Route())
assert.Equal("Route", e.WithRoute("Route").Route())
}
|
{
assert := assert.New(t)
ee := NewHTTPRequestEvent(&http.Request{Host: "test.com", URL: &url.URL{}}).WithHeadings("heading").WithLabel("foo", "bar")
eventProvider, isEvent := MarshalEvent(ee)
assert.True(isEvent)
assert.Equal(HTTPRequest, eventProvider.Flag())
assert.False(eventProvider.Timestamp().IsZero())
headingProvider, isHeadingProvider := MarshalEventHeadings(ee)
assert.True(isHeadingProvider)
assert.Equal([]string{"heading"}, headingProvider.Headings())
metaProvider, isMetaProvider := MarshalEventMetaProvider(ee)
assert.True(isMetaProvider)
assert.Equal("bar", metaProvider.Labels()["foo"])
}
|
reconciler_test.go
|
package reconciler
import (
"testing"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/operator-framework/api/pkg/operators/v1alpha1"
)
func
|
(t *testing.T) {
catsrc := &v1alpha1.CatalogSource{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "testns",
},
}
key := "kubernetes.io/os"
value := "linux"
gotCatSrcPod := Pod(catsrc, "hello", "busybox", "", map[string]string{}, map[string]string{}, int32(0), int32(0))
gotCatSrcPodSelector := gotCatSrcPod.Spec.NodeSelector
if gotCatSrcPodSelector[key] != value {
t.Errorf("expected %s value for node selector key %s, received %s value instead", value, key,
gotCatSrcPodSelector[key])
}
}
func TestPullPolicy(t *testing.T) {
var table = []struct {
image string
policy corev1.PullPolicy
}{
{
image: "quay.io/operator-framework/olm@sha256:b9d011c0fbfb65b387904f8fafc47ee1a9479d28d395473341288ee126ed993b",
policy: corev1.PullIfNotPresent,
},
{
image: "gcc@sha256:06a6f170d7fff592e44b089c0d2e68d870573eb9a23d9c66d4b6ea11f8fad18b",
policy: corev1.PullIfNotPresent,
},
{
image: "myimage:1.0",
policy: corev1.PullAlways,
},
{
image: "busybox",
policy: corev1.PullAlways,
},
{
image: "gcc@sha256:06a6f170d7fff592e44b089c0d2e68",
policy: corev1.PullIfNotPresent,
},
{
image: "hello@md5:b1946ac92492d2347c6235b4d2611184",
policy: corev1.PullIfNotPresent,
},
}
source := &v1alpha1.CatalogSource{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "test-ns",
},
}
for _, tt := range table {
p := Pod(source, "catalog", tt.image, "", nil, nil, int32(0), int32(0))
policy := p.Spec.Containers[0].ImagePullPolicy
if policy != tt.policy {
t.Fatalf("expected pull policy %s for image %s", tt.policy, tt.image)
}
}
}
func TestPodContainerSecurityContext(t *testing.T) {
expectedReadOnlyRootFilesystem := false
expectedContainerSecCtx := &corev1.SecurityContext{
ReadOnlyRootFilesystem: &expectedReadOnlyRootFilesystem,
}
catsrc := &v1alpha1.CatalogSource{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "testns",
},
}
gotPod := Pod(catsrc, "hello", "busybox", "", map[string]string{}, map[string]string{}, int32(0), int32(0))
gotContainerSecCtx := gotPod.Spec.Containers[0].SecurityContext
require.Equal(t, expectedContainerSecCtx, gotContainerSecCtx)
}
func TestPodSchedulingOverrides(t *testing.T) {
// This test ensures that any overriding pod scheduling configuration elements
// defined in spec.grpcPodConfig are applied to the catalog source pod created
// when spec.sourceType = 'grpc' and spec.image is set.
var tolerationSeconds int64 = 120
var overriddenPriorityClassName = "some-prio-class"
var overriddenNodeSelectors = map[string]string{
"label": "value",
"label2": "value2",
}
var defaultNodeSelectors = map[string]string{
"kubernetes.io/os": "linux",
}
var defaultPriorityClassName = ""
var overriddenTolerations = []corev1.Toleration{
{
Key: "some/key",
Operator: corev1.TolerationOpExists,
Effect: corev1.TaintEffectNoExecute,
TolerationSeconds: &tolerationSeconds,
},
{
Key: "someother/key",
Operator: corev1.TolerationOpEqual,
Effect: corev1.TaintEffectNoSchedule,
},
}
testCases := []struct {
title string
catalogSource *v1alpha1.CatalogSource
expectedNodeSelectors map[string]string
expectedTolerations []corev1.Toleration
expectedPriorityClassName string
annotations map[string]string
}{
{
title: "no overrides",
catalogSource: &v1alpha1.CatalogSource{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "testns",
},
Spec: v1alpha1.CatalogSourceSpec{
SourceType: v1alpha1.SourceTypeGrpc,
Image: "repo/image:tag",
},
},
expectedTolerations: nil,
expectedPriorityClassName: defaultPriorityClassName,
expectedNodeSelectors: defaultNodeSelectors,
}, {
title: "override node selectors",
catalogSource: &v1alpha1.CatalogSource{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "testns",
},
Spec: v1alpha1.CatalogSourceSpec{
SourceType: v1alpha1.SourceTypeGrpc,
Image: "repo/image:tag",
GrpcPodConfig: &v1alpha1.GrpcPodConfig{
NodeSelector: overriddenNodeSelectors,
},
},
},
expectedTolerations: nil,
expectedPriorityClassName: defaultPriorityClassName,
expectedNodeSelectors: overriddenNodeSelectors,
}, {
title: "override priority class name",
catalogSource: &v1alpha1.CatalogSource{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "testns",
},
Spec: v1alpha1.CatalogSourceSpec{
SourceType: v1alpha1.SourceTypeGrpc,
Image: "repo/image:tag",
GrpcPodConfig: &v1alpha1.GrpcPodConfig{
PriorityClassName: &overriddenPriorityClassName,
},
},
},
expectedTolerations: nil,
expectedPriorityClassName: overriddenPriorityClassName,
expectedNodeSelectors: defaultNodeSelectors,
}, {
title: "doesn't override priority class name when its nil",
catalogSource: &v1alpha1.CatalogSource{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "testns",
},
Spec: v1alpha1.CatalogSourceSpec{
SourceType: v1alpha1.SourceTypeGrpc,
Image: "repo/image:tag",
GrpcPodConfig: &v1alpha1.GrpcPodConfig{
PriorityClassName: nil,
},
},
},
expectedTolerations: nil,
expectedPriorityClassName: defaultPriorityClassName,
expectedNodeSelectors: defaultNodeSelectors,
}, {
title: "Override node tolerations",
catalogSource: &v1alpha1.CatalogSource{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "testns",
},
Spec: v1alpha1.CatalogSourceSpec{
SourceType: v1alpha1.SourceTypeGrpc,
Image: "repo/image:tag",
GrpcPodConfig: &v1alpha1.GrpcPodConfig{
Tolerations: overriddenTolerations,
},
},
},
expectedTolerations: overriddenTolerations,
expectedPriorityClassName: defaultPriorityClassName,
expectedNodeSelectors: defaultNodeSelectors,
}, {
title: "Override all the things",
catalogSource: &v1alpha1.CatalogSource{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "testns",
},
Spec: v1alpha1.CatalogSourceSpec{
SourceType: v1alpha1.SourceTypeGrpc,
Image: "repo/image:tag",
GrpcPodConfig: &v1alpha1.GrpcPodConfig{
NodeSelector: overriddenNodeSelectors,
PriorityClassName: &overriddenPriorityClassName,
Tolerations: overriddenTolerations,
},
},
},
expectedTolerations: overriddenTolerations,
expectedPriorityClassName: overriddenPriorityClassName,
expectedNodeSelectors: overriddenNodeSelectors,
}, {
title: "priorityClassName annotation takes precedence",
catalogSource: &v1alpha1.CatalogSource{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "testns",
},
Spec: v1alpha1.CatalogSourceSpec{
SourceType: v1alpha1.SourceTypeGrpc,
Image: "repo/image:tag",
GrpcPodConfig: &v1alpha1.GrpcPodConfig{
PriorityClassName: &overriddenPriorityClassName,
},
},
},
expectedTolerations: nil,
annotations: map[string]string{
CatalogPriorityClassKey: "some-OTHER-prio-class",
},
expectedPriorityClassName: "some-OTHER-prio-class",
expectedNodeSelectors: defaultNodeSelectors,
},
}
for _, testCase := range testCases {
pod := Pod(testCase.catalogSource, "hello", "busybox", "", map[string]string{}, testCase.annotations, int32(0), int32(0))
require.Equal(t, testCase.expectedNodeSelectors, pod.Spec.NodeSelector)
require.Equal(t, testCase.expectedPriorityClassName, pod.Spec.PriorityClassName)
require.Equal(t, testCase.expectedTolerations, pod.Spec.Tolerations)
}
}
|
TestPodNodeSelector
|
updateStatusMatrix.ts
|
import * as git from "isomorphic-git"
import { StatusMatrix, StatusRow } from "../../types"
export async function updateStatusMatrix(
projectRoot: string,
matrix: StatusMatrix,
patterns: string[]
): Promise<StatusMatrix> {
// return getStagingStatus(projectRoot)
if (patterns.length === 0) {
return git.statusMatrix({ dir: projectRoot })
}
const buffer = [...matrix]
for (const pattern of patterns) {
const newMat = await git.statusMatrix({
dir: projectRoot,
pattern
})
for (const newRow of newMat) {
const [fpath] = newRow
const bufferIndex = buffer.findIndex(([f]: StatusRow) => {
return f === fpath
})
if (bufferIndex > -1) {
buffer[bufferIndex] = newRow
} else {
buffer.push(newRow)
}
}
}
|
return buffer
}
| |
cli.js
|
#!/usr/bin/env node
const fs = require('fs');
const CFonts = require('cfonts')
const args = require('../libs/args')
const prompt = require('../libs/prompt')
const Whale = require('../index')
let exchangers = require('../config/exchangers.json');
let config = require('../config/whale.json');
// deepMerge to use built-in config as defaults.
const deepMerge = function (target, source) {
for (var p in source) {
if (!target.hasOwnProperty(p)) { // set
target[p] = source[p];
} else if (target[p] instanceof Object) { // recurse
deepMerge(target[p], source[p])
}
}
return target;
}
|
if (!fs.existsSync(args.config)) {
return console.error(`config file ${args.config} does not exist`);
}
// Detect combined config.
let _config = require(args.config);
if (_config.hasOwnProperty('exchangers')) {
exchangers = _config.exchangers;
}
if (_config.hasOwnProperty('whale')) {
config = deepMerge(_config.whale, config);
}
}
prompt(exchangers, args.all).then((answer) => {
CFonts.say('Whale, show Ethereum and Bitcoin price in command line interface (CLI).|Loading...', {
font: 'console',
align: 'left',
colors: ['candy'],
letterSpacing: 1,
lineHeight: 1.8,
space: true,
maxLength: '0'
})
// Init whale with selected exchange.
const exchange = exchangers[answer.exchange];
exchange.name = answer.exchange;
new Whale(config, exchange, answer.markets)
}).catch(console.error)
|
if (args.config) {
|
0012_auto_20201019_2139.py
|
# Generated by Django 3.1.1 on 2020-10-19 16:09
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
|
dependencies = [
('maps', '0011_auto_20201019_1839'),
]
operations = [
migrations.AlterField(
model_name='trafficsignal',
name='timer',
field=models.DateTimeField(default=datetime.datetime(2020, 10, 19, 21, 39, 12, 862273)),
),
]
|
|
requirements_test.go
|
package main
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"strconv"
"strings"
"time"
"github.com/ellcrys/docker/api/types"
"github.com/ellcrys/docker/api/types/swarm"
"github.com/ellcrys/docker/api/types/versions"
"github.com/ellcrys/docker/client"
"github.com/ellcrys/docker/integration-cli/requirement"
"github.com/ellcrys/docker/internal/test/registry"
)
func ArchitectureIsNot(arch string) bool {
return os.Getenv("DOCKER_ENGINE_GOARCH") != arch
}
func DaemonIsWindows() bool {
return testEnv.OSType == "windows"
}
func DaemonIsWindowsAtLeastBuild(buildNumber int) func() bool {
return func() bool {
if testEnv.OSType != "windows" {
return false
}
version := testEnv.DaemonInfo.KernelVersion
numVersion, _ := strconv.Atoi(strings.Split(version, " ")[1])
return numVersion >= buildNumber
}
}
func DaemonIsLinux() bool {
return testEnv.OSType == "linux"
}
func MinimumAPIVersion(version string) func() bool {
return func() bool {
return versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), version)
}
}
func OnlyDefaultNetworks() bool {
cli, err := client.NewEnvClient()
if err != nil {
return false
}
networks, err := cli.NetworkList(context.TODO(), types.NetworkListOptions{})
if err != nil || len(networks) > 0 {
return false
}
return true
}
// Deprecated: use skip.IfCondition(t, !testEnv.DaemonInfo.ExperimentalBuild)
func ExperimentalDaemon() bool {
return testEnv.DaemonInfo.ExperimentalBuild
}
func IsAmd64() bool {
return os.Getenv("DOCKER_ENGINE_GOARCH") == "amd64"
}
func NotArm() bool {
return ArchitectureIsNot("arm")
}
func NotArm64() bool {
return ArchitectureIsNot("arm64")
}
func NotPpc64le() bool {
return ArchitectureIsNot("ppc64le")
}
func NotS390X() bool {
return ArchitectureIsNot("s390x")
}
func SameHostDaemon() bool {
return testEnv.IsLocalDaemon()
}
func UnixCli() bool {
return isUnixCli
}
func ExecSupport() bool {
return supportsExec
}
func Network() bool {
// Set a timeout on the GET at 15s
var timeout = time.Duration(15 * time.Second)
var url = "https://hub.docker.com"
client := http.Client{
Timeout: timeout,
}
resp, err := client.Get(url)
if err != nil && strings.Contains(err.Error(), "use of closed network connection") {
panic(fmt.Sprintf("Timeout for GET request on %s", url))
}
if resp != nil {
resp.Body.Close()
}
return err == nil
}
func Apparmor() bool {
if strings.HasPrefix(testEnv.DaemonInfo.OperatingSystem, "SUSE Linux Enterprise Server ") {
return false
}
buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled")
return err == nil && len(buf) > 1 && buf[0] == 'Y'
}
func Devicemapper() bool {
return strings.HasPrefix(testEnv.DaemonInfo.Driver, "devicemapper")
}
func IPv6() bool {
cmd := exec.Command("test", "-f", "/proc/net/if_inet6")
return cmd.Run() != nil
}
func UserNamespaceROMount() bool {
// quick case--userns not enabled in this test run
if os.Getenv("DOCKER_REMAP_ROOT") == "" {
return true
}
if _, _, err := dockerCmdWithError("run", "--rm", "--read-only", "busybox", "date"); err != nil {
return false
}
return true
}
func
|
() bool {
root := os.Getenv("DOCKER_REMAP_ROOT")
return root == ""
}
func UserNamespaceInKernel() bool {
if _, err := os.Stat("/proc/self/uid_map"); os.IsNotExist(err) {
/*
* This kernel-provided file only exists if user namespaces are
* supported
*/
return false
}
// We need extra check on redhat based distributions
if f, err := os.Open("/sys/module/user_namespace/parameters/enable"); err == nil {
defer f.Close()
b := make([]byte, 1)
_, _ = f.Read(b)
return string(b) != "N"
}
return true
}
func IsPausable() bool {
if testEnv.OSType == "windows" {
return testEnv.DaemonInfo.Isolation == "hyperv"
}
return true
}
func NotPausable() bool {
if testEnv.OSType == "windows" {
return testEnv.DaemonInfo.Isolation == "process"
}
return false
}
func IsolationIs(expectedIsolation string) bool {
return testEnv.OSType == "windows" && string(testEnv.DaemonInfo.Isolation) == expectedIsolation
}
func IsolationIsHyperv() bool {
return IsolationIs("hyperv")
}
func IsolationIsProcess() bool {
return IsolationIs("process")
}
// RegistryHosting returns wether the host can host a registry (v2) or not
func RegistryHosting() bool {
// for now registry binary is built only if we're running inside
// container through `make test`. Figure that out by testing if
// registry binary is in PATH.
_, err := exec.LookPath(registry.V2binary)
return err == nil
}
func SwarmInactive() bool {
return testEnv.DaemonInfo.Swarm.LocalNodeState == swarm.LocalNodeStateInactive
}
// testRequires checks if the environment satisfies the requirements
// for the test to run or skips the tests.
func testRequires(c requirement.SkipT, requirements ...requirement.Test) {
requirement.Is(c, requirements...)
}
|
NotUserNamespace
|
input.py
|
"""Utilities for input pipelines."""
import tensorflow as tf
def
|
(tensors,
capacity=32,
min_after_dequeue=16,
num_threads=1,
dtypes=None,
shapes=None,
seed=None,
shared_name=None,
name='shuffle'):
"""Wrapper around a `tf.RandomShuffleQueue` creation.
Return a dequeue op that dequeues elements from `tensors` in a
random order, through a `tf.RandomShuffleQueue` -- see for further
documentation.
Arguments:
tensors: an iterable of tensors.
capacity: (Optional) the capacity of the queue; default value set to 32.
num_threads: (Optional) the number of threads to be used fo the queue runner;
default value set to 1.
min_after_dequeue: (Optional) minimum number of elements to remain in the
queue after a `dequeue` or `dequeu_many` has been performend,
in order to ensure better mixing of elements; default value set to 16.
dtypes: (Optional) list of `DType` objects, one for each tensor in `tensors`;
if not provided, will be inferred from `tensors`.
shapes: (Optional) list of shapes, one for each tensor in `tensors`.
seed: (Optional) seed for random shuffling.
shared_name: (Optional) If non-empty, this queue will be shared under
the given name across multiple sessions.
name: Optional name scope for the ops.
Returns:
The tuple of tensors that was randomly dequeued from `tensors`.
"""
tensors = list(tensors)
with tf.name_scope(name, values=tensors):
dtypes = dtypes or list([t.dtype for t in tensors])
queue = tf.RandomShuffleQueue(
seed=seed,
shared_name=shared_name,
name='random_shuffle_queue',
dtypes=dtypes,
shapes=shapes,
capacity=capacity,
min_after_dequeue=min_after_dequeue)
enqueue = queue.enqueue(tensors)
runner = tf.train.QueueRunner(queue, [enqueue] * num_threads)
tf.train.add_queue_runner(runner)
dequeue = queue.dequeue()
return dequeue
def shuffle_batch(tensors,
batch_size,
capacity=32,
num_threads=1,
min_after_dequeue=16,
dtypes=None,
shapes=None,
seed=None,
enqueue_many=False,
dynamic_pad=True,
allow_smaller_final_batch=False,
shared_name=None,
name='shuffle_batch'):
"""Create shuffled and padded batches of tensors in `tensors`.
Dequeue elements from `tensors` shuffling, batching and dynamically
padding them. First a `tf.RandomShuffleQueue` is created and fed with
`tensors` (using the `dket.input.shuffle` function); the dequeued tensors
shapes are then set and fed into a `tf.train.batch` function that provides
batching and dynamic padding.
Arguments:
tensors: an iterable of tensors.
batch_size: an `int` representing th batch size.
capacity: (Optional) the capacity of the queues; default value set to 32.
num_threads: (Optional) the number of threads to be used fo the queue runner;
default value set to 1.
min_after_dequeue: (Optional) minimum number of elements to remain in the
shuffling queue after a `dequeue` or `dequeu_many` has been performend,
in order to ensure better mixing of elements; default value set to 16.
dtypes: (Optional) list of `DType` objects, one for each tensor in `tensors`;
if not provided, will be inferred from `tensors`.
shapes: (Optional) list of shapes, one for each tensor in `tensors`.
seed: (Optional) seed for random shuffling.
enqueue_many: Whether each tensor in tensors is a single example.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within
a batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If True, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: if set, the queues will be shared under the given name
across different sessions.
name: scope name for the given ops.
Returns:
A batch of tensors from `tensors`, shuffled and padded.
"""
tensors = list(tensors)
with tf.name_scope(name, values=tensors):
dtypes = dtypes or list([t.dtype for t in tensors])
shapes = shapes or list([t.get_shape() for t in tensors])
inputs = shuffle(tensors,
seed=seed,
dtypes=dtypes,
capacity=capacity,
num_threads=num_threads,
min_after_dequeue=min_after_dequeue,
shared_name=shared_name,
name='shuffle')
# fix the shapes
for tensor, shape in zip(inputs, shapes):
tensor.set_shape(shape)
minibatch = tf.train.batch(
tensors=inputs,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
enqueue_many=enqueue_many,
name='batch')
return minibatch
|
shuffle
|
canvas.py
|
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
#from PyQt4.QtOpenGL import *
from libs.shape import Shape
from libs.lib import distance
CURSOR_DEFAULT = Qt.ArrowCursor
CURSOR_POINT = Qt.PointingHandCursor
CURSOR_DRAW = Qt.CrossCursor
CURSOR_MOVE = Qt.ClosedHandCursor
CURSOR_GRAB = Qt.OpenHandCursor
# class Canvas(QGLWidget):
class Canvas(QWidget):
zoomRequest = pyqtSignal(int)
scrollRequest = pyqtSignal(int, int)
newShape = pyqtSignal()
selectionChanged = pyqtSignal(bool)
shapeMoved = pyqtSignal()
drawingPolygon = pyqtSignal(bool)
CREATE, EDIT = list(range(2))
epsilon = 11.0
def __init__(self, *args, **kwargs):
super(Canvas, self).__init__(*args, **kwargs)
# Initialise local state.
self.mode = self.EDIT
self.shapes = []
self.current = None
self.selectedShape = None # save the selected shape here
self.selectedShapeCopy = None
self.drawingLineColor = QColor(0, 0, 255)
self.drawingRectColor = QColor(0, 0, 255)
self.line = Shape(line_color=self.drawingLineColor)
self.prevPoint = QPointF()
self.offsets = QPointF(), QPointF()
self.scale = 1.0
self.pixmap = QPixmap()
self.visible = {}
self._hideBackround = False
self.hideBackround = False
self.hShape = None
self.hVertex = None
self._painter = QPainter()
self._cursor = CURSOR_DEFAULT
# Menus:
self.menus = (QMenu(), QMenu())
# Set widget options.
self.setMouseTracking(True) # allow mouseMoveEvent()
self.setFocusPolicy(Qt.WheelFocus)
self.verified = False
def setDrawingColor(self, qColor):
self.drawingLineColor = qColor
self.drawingRectColor = qColor
def enterEvent(self, ev):
self.overrideCursor(self._cursor)
def leaveEvent(self, ev):
self.restoreCursor()
def focusOutEvent(self, ev):
self.restoreCursor()
def isVisible(self, shape):
return self.visible.get(shape, True) # if not found, return True
def drawing(self):
return self.mode == self.CREATE
def editing(self):
return self.mode == self.EDIT
def setEditing(self, value=True):
self.mode = self.EDIT if value else self.CREATE
if not value: # Create
self.unHighlight()
self.deSelectShape()
self.prevPoint = QPointF()
self.repaint()
def unHighlight(self):
if self.hShape:
self.hShape.highlightClear()
self.hVertex = self.hShape = None
def selectedVertex(self):
return self.hVertex is not None
def mouseMoveEvent(self, ev):
"""Update line with last point and current coordinates."""
pos = self.transformPos(ev.pos())
# Update coordinates in status bar if image is opened
window = self.parent().window()
if window.filePath is not None:
self.parent().window().labelCoordinates.setText(
'X: %d; Y: %d' % (pos.x(), pos.y()))
# Polygon drawing.
if self.drawing():
self.overrideCursor(CURSOR_DRAW)
if self.current:
color = self.drawingLineColor
if self.outOfPixmap(pos):
# Don't allow the user to draw outside the pixmap.
# Project the point to the pixmap's edges.
pos = self.intersectionPoint(self.current[-1], pos)
elif len(self.current) > 1 and self.closeEnough(pos, self.current[0]):
# Attract line to starting point and colorise to alert the
# user:
pos = self.current[0]
color = self.current.line_color
self.overrideCursor(CURSOR_POINT)
self.current.highlightVertex(0, Shape.NEAR_VERTEX)
self.line[1] = pos
self.line.line_color = color
self.prevPoint = QPointF()
self.current.highlightClear()
else:
self.prevPoint = pos
self.repaint()
return
# Polygon copy moving.
if Qt.RightButton & ev.buttons():
if self.selectedShapeCopy and self.prevPoint:
self.overrideCursor(CURSOR_MOVE)
self.boundedMoveShape(self.selectedShapeCopy, pos)
self.repaint()
elif self.selectedShape:
self.selectedShapeCopy = self.selectedShape.copy()
self.repaint()
return
# Polygon/Vertex moving.
if Qt.LeftButton & ev.buttons():
if self.selectedVertex():
self.boundedMoveVertex(pos)
self.shapeMoved.emit()
self.repaint()
elif self.selectedShape and self.prevPoint:
self.overrideCursor(CURSOR_MOVE)
self.boundedMoveShape(self.selectedShape, pos)
self.shapeMoved.emit()
self.repaint()
return
# Just hovering over the canvas, 2 posibilities:
# - Highlight shapes
# - Highlight vertex
# Update shape/vertex fill and tooltip value accordingly.
self.setToolTip("Image")
for shape in reversed([s for s in self.shapes if self.isVisible(s)]):
# Look for a nearby vertex to highlight. If that fails,
# check if we happen to be inside a shape.
index = shape.nearestVertex(pos, self.epsilon)
if index is not None:
if self.selectedVertex():
self.hShape.highlightClear()
self.hVertex, self.hShape = index, shape
shape.highlightVertex(index, shape.MOVE_VERTEX)
self.overrideCursor(CURSOR_POINT)
self.setToolTip("Click & drag to move point")
self.setStatusTip(self.toolTip())
self.update()
break
elif shape.containsPoint(pos):
if self.selectedVertex():
self.hShape.highlightClear()
self.hVertex, self.hShape = None, shape
self.setToolTip(
"Click & drag to move shape '%s'" % shape.label)
self.setStatusTip(self.toolTip())
self.overrideCursor(CURSOR_GRAB)
self.update()
break
else: # Nothing found, clear highlights, reset state.
if self.hShape:
self.hShape.highlightClear()
self.update()
self.hVertex, self.hShape = None, None
self.overrideCursor(CURSOR_DEFAULT)
def mousePressEvent(self, ev):
pos = self.transformPos(ev.pos())
if ev.button() == Qt.LeftButton:
if self.drawing():
self.handleDrawing(pos)
else:
self.selectShapePoint(pos)
self.prevPoint = pos
self.repaint()
elif ev.button() == Qt.RightButton and self.editing():
self.selectShapePoint(pos)
self.prevPoint = pos
self.repaint()
def mouseReleaseEvent(self, ev):
if ev.button() == Qt.RightButton:
menu = self.menus[bool(self.selectedShapeCopy)]
self.restoreCursor()
if not menu.exec_(self.mapToGlobal(ev.pos()))\
and self.selectedShapeCopy:
# Cancel the move by deleting the shadow copy.
self.selectedShapeCopy = None
self.repaint()
elif ev.button() == Qt.LeftButton and self.selectedShape:
if self.selectedVertex():
self.overrideCursor(CURSOR_POINT)
else:
self.overrideCursor(CURSOR_GRAB)
elif ev.button() == Qt.LeftButton:
pos = self.transformPos(ev.pos())
if self.drawing():
self.handleDrawing(pos)
def endMove(self, copy=False):
# if copy, shift the selected, and keep both Shape
# if just move, shift the selected, and remove the former
assert self.selectedShape and self.selectedShapeCopy
shape = self.selectedShapeCopy
#del shape.fill_color
#del shape.line_color
if copy:
self.shapes.append(shape)
self.selectedShape.selected = False
self.selectedShape = shape
self.repaint()
else: # just move
self.selectedShape.points = [p for p in shape.points]
self.selectedShapeCopy = None
def hideBackroundShapes(self, value):
self.hideBackround = value
if self.selectedShape:
# Only hide other shapes if there is a current selection.
# Otherwise the user will not be able to select a shape.
self.setHiding(True)
self.repaint()
def handleDrawing(self, pos):
if self.current and (self.current.reachMaxPoints() is False):
initPos = self.current[0]
minX = initPos.x()
minY = initPos.y()
targetPos = self.line[1]
maxX = targetPos.x()
maxY = targetPos.y()
self.current.addPoint(QPointF(maxX, minY))
self.current.addPoint(targetPos)
self.current.addPoint(QPointF(minX, maxY))
self.finalise()
elif not self.outOfPixmap(pos):
self.current = Shape()
self.current.addPoint(pos)
self.line.points = [pos, pos]
self.setHiding()
self.drawingPolygon.emit(True)
self.update()
def setHiding(self, enable=True):
self._hideBackround = self.hideBackround if enable else False
def canCloseShape(self):
return self.drawing() and self.current and len(self.current) > 2
def mouseDoubleClickEvent(self, ev):
# We need at least 4 points here, since the mousePress handler
# adds an extra one before this handler is called.
if self.canCloseShape() and len(self.current) > 3:
self.current.popPoint()
self.finalise()
def selectShape(self, shape):
self.deSelectShape()
shape.selected = True
self.selectedShape = shape
self.setHiding()
self.selectionChanged.emit(True)
self.update()
def selectShapePoint(self, point):
"""Select the first shape created which contains this point."""
self.deSelectShape()
if self.selectedVertex(): # A vertex is marked for selection.
index, shape = self.hVertex, self.hShape
shape.highlightVertex(index, shape.MOVE_VERTEX)
self.selectShape(shape)
return
for shape in reversed(self.shapes):
if self.isVisible(shape) and shape.containsPoint(point):
self.selectShape(shape)
self.calculateOffsets(shape, point)
return
def calculateOffsets(self, shape, point):
rect = shape.boundingRect()
x1 = rect.x() - point.x()
y1 = rect.y() - point.y()
x2 = (rect.x() + rect.width()) - point.x()
y2 = (rect.y() + rect.height()) - point.y()
self.offsets = QPointF(x1, y1), QPointF(x2, y2)
def boundedMoveVertex(self, pos):
index, shape = self.hVertex, self.hShape
point = shape[index]
if self.outOfPixmap(pos):
pos = self.intersectionPoint(point, pos)
shiftPos = pos - point
shape.moveVertexBy(index, shiftPos)
lindex = (index + 1) % 4
rindex = (index + 3) % 4
lshift = None
rshift = None
if index % 2 == 0:
rshift = QPointF(shiftPos.x(), 0)
lshift = QPointF(0, shiftPos.y())
else:
lshift = QPointF(shiftPos.x(), 0)
rshift = QPointF(0, shiftPos.y())
shape.moveVertexBy(rindex, rshift)
shape.moveVertexBy(lindex, lshift)
def boundedMoveShape(self, shape, pos):
if self.outOfPixmap(pos):
return False # No need to move
o1 = pos + self.offsets[0]
if self.outOfPixmap(o1):
pos -= QPointF(min(0, o1.x()), min(0, o1.y()))
o2 = pos + self.offsets[1]
if self.outOfPixmap(o2):
pos += QPointF(min(0, self.pixmap.width() - o2.x()),
min(0, self.pixmap.height() - o2.y()))
# The next line tracks the new position of the cursor
# relative to the shape, but also results in making it
# a bit "shaky" when nearing the border and allows it to
# go outside of the shape's area for some reason. XXX
#self.calculateOffsets(self.selectedShape, pos)
dp = pos - self.prevPoint
if dp:
shape.moveBy(dp)
self.prevPoint = pos
return True
return False
def deSelectShape(self):
if self.selectedShape:
self.selectedShape.selected = False
self.selectedShape = None
self.setHiding(False)
self.selectionChanged.emit(False)
self.update()
def deleteSelected(self):
if self.selectedShape:
shape = self.selectedShape
self.shapes.remove(self.selectedShape)
self.selectedShape = None
self.update()
return shape
def copySelectedShape(self):
if self.selectedShape:
shape = self.selectedShape.copy()
self.deSelectShape()
self.shapes.append(shape)
shape.selected = True
self.selectedShape = shape
self.boundedShiftShape(shape)
return shape
def boundedShiftShape(self, shape):
# Try to move in one direction, and if it fails in another.
# Give up if both fail.
point = shape[0]
offset = QPointF(2.0, 2.0)
self.calculateOffsets(shape, point)
self.prevPoint = point
if not self.boundedMoveShape(shape, point - offset):
self.boundedMoveShape(shape, point + offset)
def paintEvent(self, event):
if not self.pixmap:
return super(Canvas, self).paintEvent(event)
p = self._painter
p.begin(self)
p.setRenderHint(QPainter.Antialiasing)
p.setRenderHint(QPainter.HighQualityAntialiasing)
p.setRenderHint(QPainter.SmoothPixmapTransform)
p.scale(self.scale, self.scale)
p.translate(self.offsetToCenter())
p.drawPixmap(0, 0, self.pixmap)
Shape.scale = self.scale
for shape in self.shapes:
if (shape.selected or not self._hideBackround) and self.isVisible(shape):
shape.fill = shape.selected or shape == self.hShape
shape.paint(p)
if self.current:
self.current.paint(p)
self.line.paint(p)
if self.selectedShapeCopy:
self.selectedShapeCopy.paint(p)
# Paint rect
if self.current is not None and len(self.line) == 2:
leftTop = self.line[0]
rightBottom = self.line[1]
rectWidth = rightBottom.x() - leftTop.x()
rectHeight = rightBottom.y() - leftTop.y()
p.setPen(self.drawingRectColor)
brush = QBrush(Qt.BDiagPattern)
p.setBrush(brush)
p.drawRect(leftTop.x(), leftTop.y(), rectWidth, rectHeight)
if self.drawing() and not self.prevPoint.isNull() and not self.outOfPixmap(self.prevPoint):
p.setPen(QColor(0, 0, 0))
p.drawLine(self.prevPoint.x(), 0, self.prevPoint.x(), self.pixmap.height())
p.drawLine(0, self.prevPoint.y(), self.pixmap.width(), self.prevPoint.y())
self.setAutoFillBackground(True)
if self.verified:
pal = self.palette()
pal.setColor(self.backgroundRole(), QColor(184, 239, 38, 128))
self.setPalette(pal)
else:
pal = self.palette()
pal.setColor(self.backgroundRole(), QColor(232, 232, 232, 255))
self.setPalette(pal)
p.end()
def transformPos(self, point):
"""Convert from widget-logical coordinates to painter-logical coordinates."""
return point / self.scale - self.offsetToCenter()
def offsetToCenter(self):
s = self.scale
area = super(Canvas, self).size()
w, h = self.pixmap.width() * s, self.pixmap.height() * s
aw, ah = area.width(), area.height()
x = (aw - w) / (2 * s) if aw > w else 0
y = (ah - h) / (2 * s) if ah > h else 0
return QPointF(x, y)
def outOfPixmap(self, p):
w, h = self.pixmap.width(), self.pixmap.height()
return not (0 <= p.x() <= w and 0 <= p.y() <= h)
def finalise(self):
assert self.current
if self.current.points[0] == self.current.points[-1]:
self.current = None
self.drawingPolygon.emit(False)
self.update()
return
self.current.close()
self.shapes.append(self.current)
self.current = None
self.setHiding(False)
self.newShape.emit()
self.update()
def closeEnough(self, p1, p2):
#d = distance(p1 - p2)
#m = (p1-p2).manhattanLength()
# print "d %.2f, m %d, %.2f" % (d, m, d - m)
return distance(p1 - p2) < self.epsilon
def intersectionPoint(self, p1, p2):
# Cycle through each image edge in clockwise fashion,
# and find the one intersecting the current line segment.
# http://paulbourke.net/geometry/lineline2d/
size = self.pixmap.size()
points = [(0, 0),
(size.width(), 0),
(size.width(), size.height()),
(0, size.height())]
x1, y1 = p1.x(), p1.y()
x2, y2 = p2.x(), p2.y()
d, i, (x, y) = min(self.intersectingEdges((x1, y1), (x2, y2), points))
x3, y3 = points[i]
x4, y4 = points[(i + 1) % 4]
if (x, y) == (x1, y1):
# Handle cases where previous point is on one of the edges.
if x3 == x4:
return QPointF(x3, min(max(0, y2), max(y3, y4)))
else: # y3 == y4
return QPointF(min(max(0, x2), max(x3, x4)), y3)
return QPointF(x, y)
def intersectingEdges(self, x1y1, x2y2, points):
"""For each edge formed by `points', yield the intersection
with the line segment `(x1,y1) - (x2,y2)`, if it exists.
Also return the distance of `(x2,y2)' to the middle of the
edge along with its index, so that the one closest can be chosen."""
x1, y1 = x1y1
x2, y2 = x2y2
for i in range(4):
x3, y3 = points[i]
x4, y4 = points[(i + 1) % 4]
denom = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1)
nua = (x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3)
nub = (x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3)
if denom == 0:
# This covers two cases:
# nua == nub == 0: Coincident
# otherwise: Parallel
continue
ua, ub = nua / denom, nub / denom
if 0 <= ua <= 1 and 0 <= ub <= 1:
x = x1 + ua * (x2 - x1)
y = y1 + ua * (y2 - y1)
m = QPointF((x3 + x4) / 2, (y3 + y4) / 2)
d = distance(m - QPointF(x2, y2))
yield d, i, (x, y)
# These two, along with a call to adjustSize are required for the
# scroll area.
def sizeHint(self):
return self.minimumSizeHint()
def minimumSizeHint(self):
if self.pixmap:
return self.scale * self.pixmap.size()
return super(Canvas, self).minimumSizeHint()
def wheelEvent(self, ev):
qt_version = 4 if hasattr(ev, "delta") else 5
if qt_version == 4:
if ev.orientation() == Qt.Vertical:
v_delta = ev.delta()
h_delta = 0
else:
h_delta = ev.delta()
v_delta = 0
else:
delta = ev.angleDelta()
h_delta = delta.x()
v_delta = delta.y()
mods = ev.modifiers()
if Qt.ControlModifier == int(mods) and v_delta:
self.zoomRequest.emit(v_delta)
else:
v_delta and self.scrollRequest.emit(v_delta, Qt.Vertical)
h_delta and self.scrollRequest.emit(h_delta, Qt.Horizontal)
ev.accept()
def keyPressEvent(self, ev):
key = ev.key()
if key == Qt.Key_Escape and self.current:
print('ESC press')
self.current = None
self.drawingPolygon.emit(False)
self.update()
elif key == Qt.Key_Return and self.canCloseShape():
self.finalise()
elif key == Qt.Key_Left and self.selectedShape:
self.moveOnePixel('Left')
elif key == Qt.Key_Right and self.selectedShape:
self.moveOnePixel('Right')
elif key == Qt.Key_Up and self.selectedShape:
self.moveOnePixel('Up')
elif key == Qt.Key_Down and self.selectedShape:
self.moveOnePixel('Down')
def moveOnePixel(self, direction):
# print(self.selectedShape.points)
if direction == 'Left' and not self.moveOutOfBound(QPointF(-1.0, 0)):
# print("move Left one pixel")
self.selectedShape.points[0] += QPointF(-1.0, 0)
self.selectedShape.points[1] += QPointF(-1.0, 0)
self.selectedShape.points[2] += QPointF(-1.0, 0)
self.selectedShape.points[3] += QPointF(-1.0, 0)
elif direction == 'Right' and not self.moveOutOfBound(QPointF(1.0, 0)):
# print("move Right one pixel")
self.selectedShape.points[0] += QPointF(1.0, 0)
self.selectedShape.points[1] += QPointF(1.0, 0)
self.selectedShape.points[2] += QPointF(1.0, 0)
self.selectedShape.points[3] += QPointF(1.0, 0)
elif direction == 'Up' and not self.moveOutOfBound(QPointF(0, -1.0)):
# print("move Up one pixel")
self.selectedShape.points[0] += QPointF(0, -1.0)
self.selectedShape.points[1] += QPointF(0, -1.0)
self.selectedShape.points[2] += QPointF(0, -1.0)
self.selectedShape.points[3] += QPointF(0, -1.0)
elif direction == 'Down' and not self.moveOutOfBound(QPointF(0, 1.0)):
# print("move Down one pixel")
self.selectedShape.points[0] += QPointF(0, 1.0)
self.selectedShape.points[1] += QPointF(0, 1.0)
self.selectedShape.points[2] += QPointF(0, 1.0)
self.selectedShape.points[3] += QPointF(0, 1.0)
self.shapeMoved.emit()
self.repaint()
def moveOutOfBound(self, step):
points = [p1+p2 for p1, p2 in zip(self.selectedShape.points, [step]*4)]
return True in map(self.outOfPixmap, points)
def setLastLabel(self, text, line_color = None, fill_color = None):
assert text
self.shapes[-1].label = text
if line_color:
self.shapes[-1].line_color = line_color
if fill_color:
self.shapes[-1].fill_color = fill_color
return self.shapes[-1]
def undoLastLine(self):
assert self.shapes
self.current = self.shapes.pop()
self.current.setOpen()
self.line.points = [self.current[-1], self.current[0]]
self.drawingPolygon.emit(True)
def
|
(self):
assert self.shapes
self.current = self.shapes.pop()
self.current.setOpen()
self.line.points = [self.current[-1], self.current[0]]
self.drawingPolygon.emit(True)
self.current = None
self.drawingPolygon.emit(False)
self.update()
def loadPixmap(self, pixmap):
self.pixmap = pixmap
self.shapes = []
self.repaint()
def loadShapes(self, shapes):
self.shapes = list(shapes)
self.current = None
self.repaint()
def setShapeVisible(self, shape, value):
self.visible[shape] = value
self.repaint()
def currentCursor(self):
cursor = QApplication.overrideCursor()
if cursor is not None:
cursor = cursor.shape()
return cursor
def overrideCursor(self, cursor):
self._cursor = cursor
if self.currentCursor() is None:
QApplication.setOverrideCursor(cursor)
else:
QApplication.changeOverrideCursor(cursor)
def restoreCursor(self):
QApplication.restoreOverrideCursor()
def resetState(self):
self.restoreCursor()
self.pixmap = None
self.update()
|
resetAllLines
|
core.py
|
"""
This module provides the functionality to create the temporal
SQL database and to establish a connection to the database.
Usage:
.. code-block:: python
>>> import grass.temporal as tgis
>>> # Create the temporal database
>>> tgis.init()
>>> # Establish a database connection
>>> dbif, connected = tgis.init_dbif(None)
>>> dbif.connect()
>>> # Execute a SQL statement
>>> dbif.execute_transaction("SELECT datetime(0, 'unixepoch', 'localtime');")
>>> # Mogrify an SQL statement
>>> dbif.mogrify_sql_statement(["SELECT name from raster_base where name = ?",
... ("precipitation",)])
"SELECT name from raster_base where name = 'precipitation'"
>>> dbif.close()
(C) 2011-2014 by the GRASS Development Team
This program is free software under the GNU General Public
License (>=v2). Read the file COPYING that comes with GRASS
for details.
:author: Soeren Gebbert
"""
#import traceback
import os
import sys
import grass.script as gscript
if sys.version_info.major == 3:
long = int
from .c_libraries_interface import *
from grass.pygrass import messages
from grass.script.utils import decode, encode
# Import all supported database backends
# Ignore import errors since they are checked later
try:
import sqlite3
except ImportError:
pass
# Postgresql is optional, existence is checked when needed
try:
import psycopg2
import psycopg2.extras
except:
pass
import atexit
from datetime import datetime
###############################################################################
def profile_function(func):
"""Profiling function provided by the temporal framework"""
do_profiling = os.getenv("GRASS_TGIS_PROFILE")
if do_profiling == "True" or do_profiling == "1":
import cProfile, pstats
try:
import StringIO as io
except ImportError:
import io
pr = cProfile.Profile()
pr.enable()
func()
pr.disable()
s = io.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
else:
func()
# Global variable that defines the backend
# of the temporal GIS
# It can either be "sqlite" or "pg"
tgis_backend = None
def get_tgis_backend():
"""Return the temporal GIS backend as string
:returns: either "sqlite" or "pg"
"""
global tgis_backend
return tgis_backend
# Global variable that defines the database string
# of the temporal GIS
tgis_database = None
def get_tgis_database():
"""Return the temporal database string specified with t.connect
"""
global tgis_database
return tgis_database
# The version of the temporal framework
# this value must be an integer larger than 0
# Increase this value in case of backward incompatible changes in the TGIS API
tgis_version = 2
# The version of the temporal database since framework and database version
# can differ this value must be an integer larger than 0
# Increase this value in case of backward incompatible changes
# temporal database SQL layout
tgis_db_version = 2
# We need to know the parameter style of the database backend
tgis_dbmi_paramstyle = None
def get_tgis_dbmi_paramstyle():
"""Return the temporal database backend parameter style
:returns: "qmark" or ""
"""
global tgis_dbmi_paramstyle
return tgis_dbmi_paramstyle
# We need to access the current mapset quite often in the framework, so we make
# a global variable that will be initiated when init() is called
current_mapset = None
current_location = None
current_gisdbase = None
###############################################################################
def get_current_mapset():
"""Return the current mapset
This is the fastest way to receive the current mapset.
The current mapset is set by init() and stored in a global variable.
This function provides access to this global variable.
"""
global current_mapset
return current_mapset
###############################################################################
def get_current_location():
"""Return the current location
This is the fastest way to receive the current location.
The current location is set by init() and stored in a global variable.
This function provides access to this global variable.
"""
global current_location
return current_location
###############################################################################
def get_current_gisdbase():
"""Return the current gis database (gisdbase)
This is the fastest way to receive the current gisdbase.
The current gisdbase is set by init() and stored in a global variable.
This function provides access to this global variable.
"""
global current_gisdbase
return current_gisdbase
###############################################################################
# If this global variable is set True, then maps can only be registered in
# space time datasets with the same mapset. In addition, only maps in the
# current mapset can be inserted, updated or deleted from the temporal database.
# Overwrite this global variable by: g.gisenv set="TGIS_DISABLE_MAPSET_CHECK=True"
# ATTENTION: Be aware to face corrupted temporal database in case this global
# variable is set to False. This feature is highly
# experimental and violates the grass permission guidance.
enable_mapset_check = True
# If this global variable is set True, the timestamps of maps will be written
# as textfiles for each map that will be inserted or updated in the temporal
# database using the C-library timestamp interface.
# Overwrite this global variable by: g.gisenv set="TGIS_DISABLE_TIMESTAMP_WRITE=True"
# ATTENTION: Be aware to face corrupted temporal database in case this global
# variable is set to False. This feature is highly
# experimental and violates the grass permission guidance.
enable_timestamp_write = True
def get_enable_mapset_check():
"""Return True if the mapsets should be checked while insert, update,
delete requests and space time dataset registration.
If this global variable is set True, then maps can only be registered
in space time datasets with the same mapset. In addition, only maps in
the current mapset can be inserted, updated or deleted from the temporal
database.
Overwrite this global variable by: g.gisenv set="TGIS_DISABLE_MAPSET_CHECK=True"
..warning::
Be aware to face corrupted temporal database in case this
global variable is set to False. This feature is highly
experimental and violates the grass permission guidance.
"""
global enable_mapset_check
return enable_mapset_check
def get_enable_timestamp_write():
"""Return True if the map timestamps should be written to the spatial
database metadata as well.
If this global variable is set True, the timestamps of maps will be
written as textfiles for each map that will be inserted or updated in
the temporal database using the C-library timestamp interface.
Overwrite this global variable by: g.gisenv set="TGIS_DISABLE_TIMESTAMP_WRITE=True"
..warning::
Be aware that C-libraries can not access timestamp information if
they are not written as spatial database metadata, hence modules
that make use of timestamps using the C-library interface will not
work with maps that were created without writing the timestamps.
"""
global enable_timestamp_write
return enable_timestamp_write
###############################################################################
# The global variable that stores the PyGRASS Messenger object that
# provides a fast and exit safe interface to the C-library message functions
message_interface = None
def _init_tgis_message_interface(raise_on_error=False):
"""Initiate the global message interface
:param raise_on_error: If True raise a FatalError exception in case of
a fatal error, call sys.exit(1) otherwise
"""
global message_interface
if message_interface is None:
message_interface = messages.get_msgr(raise_on_error=raise_on_error)
def get_tgis_message_interface():
"""Return the temporal GIS message interface which is of type
grass.pygrass.message.Messenger()
Use this message interface to print messages to stdout using the
GRASS C-library messaging system.
"""
global message_interface
return message_interface
###############################################################################
# The global variable that stores the C-library interface object that
# provides a fast and exit safe interface to the C-library libgis,
# libraster, libraster3d and libvector functions
c_library_interface = None
def _init_tgis_c_library_interface():
"""Set the global C-library interface variable that
provides a fast and exit safe interface to the C-library libgis,
libraster, libraster3d and libvector functions
"""
global c_library_interface
if c_library_interface is None:
c_library_interface = CLibrariesInterface()
def get_tgis_c_library_interface():
"""Return the C-library interface that
provides a fast and exit safe interface to the C-library libgis,
libraster, libraster3d and libvector functions
"""
global c_library_interface
return c_library_interface
###############################################################################
# Set this variable True to raise a FatalError exception
# in case a fatal error occurs using the messenger interface
raise_on_error = False
def set_raise_on_error(raise_exp=True):
"""Define behavior on fatal error, invoked using the tgis messenger
interface (msgr.fatal())
The messenger interface will be restarted using the new error policy
:param raise_exp: True to raise a FatalError exception instead of calling
sys.exit(1) when using the tgis messenger interface
.. code-block:: python
>>> import grass.temporal as tgis
>>> tgis.init()
>>> ignore = tgis.set_raise_on_error(False)
>>> msgr = tgis.get_tgis_message_interface()
>>> tgis.get_raise_on_error()
False
>>> msgr.fatal("Ohh no no no!")
Traceback (most recent call last):
File "__init__.py", line 239, in fatal
sys.exit(1)
SystemExit: 1
>>> tgis.set_raise_on_error(True)
False
>>> msgr.fatal("Ohh no no no!")
Traceback (most recent call last):
File "__init__.py", line 241, in fatal
raise FatalError(message)
FatalError: Ohh no no no!
:returns: current status
"""
global raise_on_error
tmp_raise = raise_on_error
raise_on_error = raise_exp
global message_interface
if message_interface:
message_interface.set_raise_on_error(raise_on_error)
else:
_init_tgis_message_interface(raise_on_error)
return tmp_raise
def get_raise_on_error():
"""Return True if a FatalError exception is raised instead of calling
sys.exit(1) in case a fatal error was invoked with msgr.fatal()
"""
global raise_on_error
return raise_on_error
###############################################################################
def get_tgis_version():
"""Get the version number of the temporal framework
:returns: The version number of the temporal framework as string
"""
global tgis_version
return tgis_version
###############################################################################
def get_tgis_db_version():
"""Get the version number of the temporal framework
:returns: The version number of the temporal framework as string
"""
global tgis_db_version
return tgis_db_version
###############################################################################
def get_tgis_metadata(dbif=None):
"""Return the tgis metadata table as a list of rows (dicts) or None if not
present
:param dbif: The database interface to be used
:returns: The selected rows with key/value columns or None
"""
dbif, connected = init_dbif(dbif)
# Select metadata if the table is present
try:
statement = "SELECT * FROM tgis_metadata;\n"
dbif.execute(statement)
rows = dbif.fetchall()
except:
rows = None
if connected:
dbif.close()
return rows
###############################################################################
# The temporal database string set with t.connect
# with substituted GRASS variables gisdbase, location and mapset
tgis_database_string = None
def get_tgis_database_string():
"""Return the preprocessed temporal database string
This string is the temporal database string set with t.connect
that was processed to substitue location, gisdbase and mapset
variables.
"""
global tgis_database_string
return tgis_database_string
###############################################################################
def get_sql_template_path():
base = os.getenv("GISBASE")
base_etc = os.path.join(base, "etc")
return os.path.join(base_etc, "sql")
###############################################################################
def stop_subprocesses():
"""Stop the messenger and C-interface subprocesses
that are started by tgis.init()
"""
global message_interface
global c_library_interface
if message_interface:
message_interface.stop()
if c_library_interface:
c_library_interface.stop()
# We register this function to be called at exit
atexit.register(stop_subprocesses)
def get_available_temporal_mapsets():
"""Return a list of of mapset names with temporal database driver and names
that are accessible from the current mapset.
:returns: A dictionary, mapset names are keys, the tuple (driver,
database) are the values
"""
global c_library_interface
global message_interface
mapsets = c_library_interface.available_mapsets()
tgis_mapsets = {}
for mapset in mapsets:
mapset = mapset
driver = c_library_interface.get_driver_name(mapset)
database = c_library_interface.get_database_name(mapset)
message_interface.debug(1, "get_available_temporal_mapsets: "\
"\n mapset %s\n driver %s\n database %s"%(mapset,
driver, database))
if driver and database:
# Check if the temporal sqlite database exists
# We need to set non-existing databases in case the mapset is the current mapset
# to create it
if (driver == "sqlite" and os.path.exists(database)) or mapset == get_current_mapset() :
tgis_mapsets[mapset] = (driver, database)
# We need to warn if the connection is defined but the database does not
# exists
if driver == "sqlite" and not os.path.exists(database):
message_interface.warning("Temporal database connection defined as:\n" + \
database + "\nBut database file does not exist.")
return tgis_mapsets
###############################################################################
def init(raise_fatal_error=False):
"""This function set the correct database backend from GRASS environmental
variables and creates the grass temporal database structure for raster,
vector and raster3d maps as well as for the space-time datasets strds,
str3ds and stvds in case it does not exist.
Several global variables are initiated and the messenger and C-library
interface subprocesses are spawned.
Re-run this function in case the following GRASS variables change while
the process runs:
- MAPSET
- LOCATION_NAME
- GISDBASE
- TGIS_DISABLE_MAPSET_CHECK
- TGIS_DISABLE_TIMESTAMP_WRITE
Re-run this function if the following t.connect variables change while
the process runs:
- temporal GIS driver (set by t.connect driver=)
- temporal GIS database (set by t.connect database=)
The following environmental variables are checked:
- GRASS_TGIS_PROFILE (True, False, 1, 0)
- GRASS_TGIS_RAISE_ON_ERROR (True, False, 1, 0)
..warning::
This functions must be called before any spatio-temporal processing
can be started
:param raise_fatal_error: Set this True to assure that the init()
function does not kill a persistent process
like the GUI. If set True a
grass.pygrass.messages.FatalError
exception will be raised in case a fatal
error occurs in the init process, otherwise
sys.exit(1) will be called.
"""
# We need to set the correct database backend and several global variables
# from the GRASS mapset specific environment variables of g.gisenv and t.connect
global tgis_backend
global tgis_database
global tgis_database_string
global tgis_dbmi_paramstyle
global raise_on_error
global enable_mapset_check
global enable_timestamp_write
global current_mapset
global current_location
global current_gisdbase
raise_on_error = raise_fatal_error
# We must run t.connect at first to create the temporal database and to
# get the environmental variables
gscript.run_command("t.connect", flags="c")
grassenv = gscript.gisenv()
# Set the global variable for faster access
current_mapset = grassenv["MAPSET"]
current_location = grassenv["LOCATION_NAME"]
current_gisdbase = grassenv["GISDBASE"]
# Check environment variable GRASS_TGIS_RAISE_ON_ERROR
if os.getenv("GRASS_TGIS_RAISE_ON_ERROR") == "True" or \
os.getenv("GRASS_TGIS_RAISE_ON_ERROR") == "1":
raise_on_error = True
# Check if the script library raises on error,
# if so we do the same
if gscript.get_raise_on_error() is True:
raise_on_error = True
# Start the GRASS message interface server
_init_tgis_message_interface(raise_on_error)
# Start the C-library interface server
_init_tgis_c_library_interface()
msgr = get_tgis_message_interface()
msgr.debug(1, "Initiate the temporal database")
#"\n traceback:%s"%(str(" \n".join(traceback.format_stack()))))
msgr.debug(1, ("Raise on error id: %s"%str(raise_on_error)))
ciface = get_tgis_c_library_interface()
driver_string = ciface.get_driver_name()
database_string = ciface.get_database_name()
# Set the mapset check and the timestamp write
if "TGIS_DISABLE_MAPSET_CHECK" in grassenv:
if gscript.encode(grassenv["TGIS_DISABLE_MAPSET_CHECK"]) == "True" or \
gscript.encode(grassenv["TGIS_DISABLE_MAPSET_CHECK"]) == "1":
enable_mapset_check = False
msgr.warning("TGIS_DISABLE_MAPSET_CHECK is True")
if "TGIS_DISABLE_TIMESTAMP_WRITE" in grassenv:
if gscript.encode(grassenv["TGIS_DISABLE_TIMESTAMP_WRITE"]) == "True" or \
gscript.encode(grassenv["TGIS_DISABLE_TIMESTAMP_WRITE"]) == "1":
enable_timestamp_write = False
msgr.warning("TGIS_DISABLE_TIMESTAMP_WRITE is True")
if driver_string is not None and driver_string != "":
driver_string = decode(driver_string)
if driver_string == "sqlite":
tgis_backend = driver_string
try:
import sqlite3
except ImportError:
msgr.error("Unable to locate the sqlite SQL Python interface"
" module sqlite3.")
raise
dbmi = sqlite3
elif driver_string == "pg":
tgis_backend = driver_string
try:
import psycopg2
except ImportError:
msgr.error("Unable to locate the Postgresql SQL Python "
"interface module psycopg2.")
raise
dbmi = psycopg2
else:
msgr.fatal(_("Unable to initialize the temporal DBMI interface. "
"Please use t.connect to specify the driver and the"
" database string"))
else:
# Set the default sqlite3 connection in case nothing was defined
gscript.run_command("t.connect", flags="d")
driver_string = ciface.get_driver_name()
database_string = ciface.get_database_name()
tgis_backend = driver_string
try:
import sqlite3
except ImportError:
msgr.error("Unable to locate the sqlite SQL Python interface"
" module sqlite3.")
raise
dbmi = sqlite3
tgis_database_string = database_string
# Set the parameter style
tgis_dbmi_paramstyle = dbmi.paramstyle
# We do not know if the database already exists
db_exists = False
dbif = SQLDatabaseInterfaceConnection()
# Check if the database already exists
if tgis_backend == "sqlite":
# Check path of the sqlite database
if os.path.exists(tgis_database_string):
dbif.connect()
# Check for raster_base table
dbif.execute("SELECT name FROM sqlite_master WHERE type='table' "
"AND name='raster_base';")
name = dbif.fetchone()
if name and name[0] == "raster_base":
db_exists = True
dbif.close()
elif tgis_backend == "pg":
# Connect to database
dbif.connect()
# Check for raster_base table
dbif.execute("SELECT EXISTS(SELECT * FROM information_schema.tables "
"WHERE table_name=%s)", ('raster_base',))
if dbif.fetchone()[0]:
db_exists = True
backup_howto = "The format of your actual temporal database is not " \
"supported any more.\nSolution: You need to export it by " \
"restoring the GRASS GIS version used for creating this DB"\
". From there, create a backup of your temporal database "\
"to avoid the loss of your temporal data.\nNotes: Use " \
"t.rast.export and t.vect.export to make a backup of your" \
" existing space time datasets.To safe the timestamps of" \
" your existing maps and space time datasets, use " \
"t.rast.list, t.vect.list and t.rast3d.list. "\
"You can register the existing time stamped maps easily if"\
" you export columns=id,start_time,end_time into text "\
"files and use t.register to register them again in new" \
" created space time datasets (t.create). After the backup"\
" remove the existing temporal database, a new one will be"\
" created automatically.\n"
if db_exists is True:
# Check the version of the temporal database
dbif.close()
dbif.connect()
metadata = get_tgis_metadata(dbif)
dbif.close()
if metadata is None:
msgr.fatal(_("Unable to receive temporal database metadata.\n"
"Current temporal database info:%(info)s") % (
{"info": get_database_info_string()}))
for entry in metadata:
if "tgis_version" in entry and entry[1] != str(get_tgis_version()):
msgr.fatal(_("Unsupported temporal database: version mismatch."
"\n %(backup)s Supported temporal API version is:"
" %(api)i.\nPlease update your GRASS GIS "
"installation.\nCurrent temporal database info:"
"%(info)s") % ({"backup": backup_howto,
"api": get_tgis_version(),
"info": get_database_info_string()}))
if "tgis_db_version" in entry and entry[1] != str(get_tgis_db_version()):
msgr.fatal(_("Unsupported temporal database: version mismatch."
"\n %(backup)sSupported temporal database version"
" is: %(tdb)i\nCurrent temporal database info:"
"%(info)s") % ({"backup": backup_howto,
"tdb": get_tgis_version(),
"info": get_database_info_string()}))
return
create_temporal_database(dbif)
###############################################################################
def get_database_info_string():
dbif = SQLDatabaseInterfaceConnection()
info = "\nDBMI interface:..... " + str(dbif.get_dbmi().__name__)
info += "\nTemporal database:.. " + str(get_tgis_database_string())
return info
###############################################################################
def create_temporal_database(dbif):
"""This function will create the temporal database
It will create all tables and triggers that are needed to run
the temporal GIS
:param dbif: The database interface to be used
"""
global tgis_backend
global tgis_version
global tgis_db_version
global tgis_database_string
template_path = get_sql_template_path()
msgr = get_tgis_message_interface()
# Read all SQL scripts and templates
map_tables_template_sql = open(os.path.join(
template_path, "map_tables_template.sql"), 'r').read()
raster_metadata_sql = open(os.path.join(
get_sql_template_path(), "raster_metadata_table.sql"), 'r').read()
raster3d_metadata_sql = open(os.path.join(template_path,
"raster3d_metadata_table.sql"),
'r').read()
vector_metadata_sql = open(os.path.join(template_path,
"vector_metadata_table.sql"),
'r').read()
raster_views_sql = open(os.path.join(template_path, "raster_views.sql"),
'r').read()
raster3d_views_sql = open(os.path.join(template_path,
"raster3d_views.sql"), 'r').read()
vector_views_sql = open(os.path.join(template_path, "vector_views.sql"),
'r').read()
stds_tables_template_sql = open(os.path.join(template_path,
"stds_tables_template.sql"),
'r').read()
strds_metadata_sql = open(os.path.join(template_path,
"strds_metadata_table.sql"),
'r').read()
str3ds_metadata_sql = open(os.path.join(template_path,
"str3ds_metadata_table.sql"),
'r').read()
stvds_metadata_sql = open(os.path.join(template_path,
"stvds_metadata_table.sql"),
'r').read()
strds_views_sql = open(os.path.join(template_path, "strds_views.sql"),
'r').read()
str3ds_views_sql = open(os.path.join(template_path, "str3ds_views.sql"),
'r').read()
stvds_views_sql = open(os.path.join(template_path, "stvds_views.sql"),
'r').read()
# Create the raster, raster3d and vector tables SQL statements
raster_tables_sql = map_tables_template_sql.replace("GRASS_MAP", "raster")
vector_tables_sql = map_tables_template_sql.replace("GRASS_MAP", "vector")
raster3d_tables_sql = map_tables_template_sql.replace(
"GRASS_MAP", "raster3d")
# Create the space-time raster, raster3d and vector dataset tables
# SQL statements
strds_tables_sql = stds_tables_template_sql.replace("STDS", "strds")
stvds_tables_sql = stds_tables_template_sql.replace("STDS", "stvds")
str3ds_tables_sql = stds_tables_template_sql.replace("STDS", "str3ds")
msgr.message(_("Creating temporal database: %s" % (str(tgis_database_string))))
if tgis_backend == "sqlite":
# We need to create the sqlite3 database path if it does not exist
tgis_dir = os.path.dirname(tgis_database_string)
if not os.path.exists(tgis_dir):
try:
os.makedirs(tgis_dir)
except Exception as e:
msgr.fatal(_("Unable to create SQLite temporal database\n"
"Exception: %s\nPlease use t.connect to set a "
"read- and writable temporal database path" % (e)))
# Set up the trigger that takes care of
# the correct deletion of entries across the different tables
delete_trigger_sql = open(os.path.join(template_path,
"sqlite3_delete_trigger.sql"),
'r').read()
indexes_sql = open(os.path.join(template_path, "sqlite3_indexes.sql"),
'r').read()
else:
# Set up the trigger that takes care of
# the correct deletion of entries across the different tables
delete_trigger_sql = open(os.path.join(template_path,
"postgresql_delete_trigger.sql"),
'r').read()
indexes_sql = open(os.path.join(template_path,
"postgresql_indexes.sql"), 'r').read()
# Connect now to the database
if dbif.connected is not True:
dbif.connect()
# Execute the SQL statements for sqlite
# Create the global tables for the native grass datatypes
dbif.execute_transaction(raster_tables_sql)
dbif.execute_transaction(raster_metadata_sql)
dbif.execute_transaction(raster_views_sql)
dbif.execute_transaction(vector_tables_sql)
dbif.execute_transaction(vector_metadata_sql)
dbif.execute_transaction(vector_views_sql)
dbif.execute_transaction(raster3d_tables_sql)
dbif.execute_transaction(raster3d_metadata_sql)
dbif.execute_transaction(raster3d_views_sql)
# Create the tables for the new space-time datatypes
dbif.execute_transaction(strds_tables_sql)
dbif.execute_transaction(strds_metadata_sql)
dbif.execute_transaction(strds_views_sql)
dbif.execute_transaction(stvds_tables_sql)
dbif.execute_transaction(stvds_metadata_sql)
dbif.execute_transaction(stvds_views_sql)
dbif.execute_transaction(str3ds_tables_sql)
dbif.execute_transaction(str3ds_metadata_sql)
dbif.execute_transaction(str3ds_views_sql)
# The delete trigger
dbif.execute_transaction(delete_trigger_sql)
# The indexes
dbif.execute_transaction(indexes_sql)
# Create the tgis metadata table to store the database
# initial configuration
# The metadata table content
metadata = {}
metadata["tgis_version"] = tgis_version
metadata["tgis_db_version"] = tgis_db_version
metadata["creation_time"] = datetime.today()
_create_tgis_metadata_table(metadata, dbif)
dbif.close()
###############################################################################
def _create_tgis_metadata_table(content, dbif=None):
"""!Create the temporal gis metadata table which stores all metadata
information about the temporal database.
:param content: The dictionary that stores the key:value metadata
that should be stored in the metadata table
:param dbif: The database interface to be used
"""
dbif, connected = init_dbif(dbif)
statement = "CREATE TABLE tgis_metadata (key VARCHAR NOT NULL, value VARCHAR);\n";
dbif.execute_transaction(statement)
for key in content.keys():
statement = "INSERT INTO tgis_metadata (key, value) VALUES " + \
"(\'%s\' , \'%s\');\n" % (str(key), str(content[key]))
dbif.execute_transaction(statement)
if connected:
dbif.close()
###############################################################################
class SQLDatabaseInterfaceConnection(object):
def __init__(self):
self.tgis_mapsets = get_available_temporal_mapsets()
self.current_mapset = get_current_mapset()
self.connections = {}
self.connected = False
self.unique_connections = {}
for mapset in self.tgis_mapsets.keys():
driver, dbstring = self.tgis_mapsets[mapset]
if dbstring not in self.unique_connections.keys():
self.unique_connections[dbstring] = DBConnection(backend=driver,
dbstring=dbstring)
self.connections[mapset] = self.unique_connections[dbstring]
self.msgr = get_tgis_message_interface()
def get_dbmi(self, mapset=None):
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
return self.connections[mapset].dbmi
def
|
(self, mapset=None):
"""
Roll back the last transaction. This must be called
in case a new query should be performed after a db error.
This is only relevant for postgresql database.
"""
if mapset is None:
mapset = self.current_mapset
def connect(self):
"""Connect to the DBMI to execute SQL statements
Supported backends are sqlite3 and postgresql
"""
for mapset in self.tgis_mapsets.keys():
driver, dbstring = self.tgis_mapsets[mapset]
conn = self.connections[mapset]
if conn.is_connected() is False:
conn.connect(dbstring)
self.connected = True
def is_connected(self):
return self.connected
def close(self):
"""Close the DBMI connection
There may be several temporal databases in a location, hence
close all temporal databases that have been opened.
"""
for key in self.unique_connections.keys():
self.unique_connections[key].close()
self.connected = False
def mogrify_sql_statement(self, content, mapset=None):
"""Return the SQL statement and arguments as executable SQL string
:param content: The content as tuple with two entries, the first
entry is the SQL statement with DBMI specific
place holder (?), the second entry is the argument
list that should substitute the place holder.
:param mapset: The mapset of the abstract dataset or temporal
database location, if None the current mapset
will be used
"""
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to mogrify sql statement. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].mogrify_sql_statement(content)
def check_table(self, table_name, mapset=None):
"""Check if a table exists in the temporal database
:param table_name: The name of the table to be checked for existence
:param mapset: The mapset of the abstract dataset or temporal
database location, if None the current mapset
will be used
:returns: True if the table exists, False otherwise
TODO:
There may be several temporal databases in a location, hence
the mapset is used to query the correct temporal database.
"""
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to check table. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].check_table(table_name)
def execute(self, statement, args=None, mapset=None):
"""
:param mapset: The mapset of the abstract dataset or temporal
database location, if None the current mapset
will be used
"""
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to execute sql statement. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].execute(statement, args)
def fetchone(self, mapset=None):
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to fetch one. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].fetchone()
def fetchall(self, mapset=None):
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to fetch all. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].fetchall()
def execute_transaction(self, statement, mapset=None):
"""Execute a transactional SQL statement
The BEGIN and END TRANSACTION statements will be added automatically
to the sql statement
:param statement: The executable SQL statement or SQL script
"""
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to execute transaction. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].execute_transaction(statement)
def _create_mapset_error_message(self, mapset):
return("You have no permission to "
"access mapset <%(mapset)s>, or "
"mapset <%(mapset)s> has no temporal database. "
"Accessible mapsets are: <%(mapsets)s>" % \
{"mapset": decode(mapset),
"mapsets":','.join(self.tgis_mapsets.keys())})
###############################################################################
class DBConnection(object):
"""This class represents the database interface connection
and provides access to the chosen backend modules.
The following DBMS are supported:
- sqlite via the sqlite3 standard library
- postgresql via psycopg2
"""
def __init__(self, backend=None, dbstring=None):
""" Constructor of a database connection
param backend:The database backend sqlite or pg
param dbstring: The database connection string
"""
self.connected = False
if backend is None:
global tgis_backend
if decode(tgis_backend) == "sqlite":
self.dbmi = sqlite3
else:
self.dbmi = psycopg2
else:
if decode(backend) == "sqlite":
self.dbmi = sqlite3
else:
self.dbmi = psycopg2
if dbstring is None:
global tgis_database_string
self.dbstring = tgis_database_string
self.dbstring = dbstring
self.msgr = get_tgis_message_interface()
self.msgr.debug(1, "DBConnection constructor:"\
"\n backend: %s"\
"\n dbstring: %s"%(backend, self.dbstring))
#"\n traceback:%s"%(backend, self.dbstring,
#str(" \n".join(traceback.format_stack()))))
def __del__(self):
if self.connected is True:
self.close()
def is_connected(self):
return self.connected
def rollback(self):
"""
Roll back the last transaction. This must be called
in case a new query should be performed after a db error.
This is only relevant for postgresql database.
"""
if self.dbmi.__name__ == "psycopg2":
if self.connected:
self.connection.rollback()
def connect(self, dbstring=None):
"""Connect to the DBMI to execute SQL statements
Supported backends are sqlite3 and postgresql
param dbstring: The database connection string
"""
# Connection in the current mapset
if dbstring is None:
dbstring = self.dbstring
dbstring = decode(dbstring)
try:
if self.dbmi.__name__ == "sqlite3":
self.connection = self.dbmi.connect(dbstring,
detect_types=self.dbmi.PARSE_DECLTYPES | self.dbmi.PARSE_COLNAMES)
self.connection.row_factory = self.dbmi.Row
self.connection.isolation_level = None
self.connection.text_factory = str
self.cursor = self.connection.cursor()
self.cursor.execute("PRAGMA synchronous = OFF")
self.cursor.execute("PRAGMA journal_mode = MEMORY")
elif self.dbmi.__name__ == "psycopg2":
self.connection = self.dbmi.connect(dbstring)
#self.connection.set_isolation_level(dbmi.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.cursor = self.connection.cursor(
cursor_factory=self.dbmi.extras.DictCursor)
self.connected = True
except Exception as e:
self.msgr.fatal(_("Unable to connect to %(db)s database: "
"%(string)s\nException: \"%(ex)s\"\nPlease use"
" t.connect to set a read- and writable "
"temporal database backend") % (
{"db": self.dbmi.__name__,
"string": tgis_database_string, "ex": e, }))
def close(self):
"""Close the DBMI connection
TODO:
There may be several temporal databases in a location, hence
close all temporal databases that have been opened. Use a dictionary
to manage different connections.
"""
self.connection.commit()
self.cursor.close()
self.connected = False
def mogrify_sql_statement(self, content):
"""Return the SQL statement and arguments as executable SQL string
TODO:
Use the mapset argument to identify the correct database driver
:param content: The content as tuple with two entries, the first
entry is the SQL statement with DBMI specific
place holder (?), the second entry is the argument
list that should substitute the place holder.
:param mapset: The mapset of the abstract dataset or temporal
database location, if None the current mapset
will be used
Usage:
.. code-block:: python
>>> init()
>>> dbif = SQLDatabaseInterfaceConnection()
>>> dbif.mogrify_sql_statement(["SELECT ctime FROM raster_base WHERE id = ?",
... ["soil@PERMANENT",]])
"SELECT ctime FROM raster_base WHERE id = 'soil@PERMANENT'"
"""
sql = content[0]
args = content[1]
if self.dbmi.__name__ == "psycopg2":
if len(args) == 0:
return sql
else:
if self.connected:
try:
return self.cursor.mogrify(sql, args)
except Exception as exc:
print(sql, args)
raise exc
else:
self.connect()
statement = self.cursor.mogrify(sql, args)
self.close()
return statement
elif self.dbmi.__name__ == "sqlite3":
if len(args) == 0:
return sql
else:
# Unfortunately as sqlite does not support
# the transformation of sql strings and qmarked or
# named arguments we must make our hands dirty
# and do it by ourself. :(
# Doors are open for SQL injection because of the
# limited python sqlite3 implementation!!!
pos = 0
count = 0
maxcount = 100
statement = sql
while count < maxcount:
pos = statement.find("?", pos + 1)
if pos == -1:
break
if args[count] is None:
statement = "%sNULL%s" % (statement[0:pos],
statement[pos + 1:])
elif isinstance(args[count], (int, long)):
statement = "%s%d%s" % (statement[0:pos], args[count],
statement[pos + 1:])
elif isinstance(args[count], float):
statement = "%s%f%s" % (statement[0:pos], args[count],
statement[pos + 1:])
elif isinstance(args[count], datetime):
statement = "%s\'%s\'%s" % (statement[0:pos], str(args[count]),
statement[pos + 1:])
else:
# Default is a string, this works for datetime
# objects too
statement = "%s\'%s\'%s" % (statement[0:pos],
str(args[count]),
statement[pos + 1:])
count += 1
return statement
def check_table(self, table_name):
"""Check if a table exists in the temporal database
:param table_name: The name of the table to be checked for existence
:param mapset: The mapset of the abstract dataset or temporal
database location, if None the current mapset
will be used
:returns: True if the table exists, False otherwise
TODO:
There may be several temporal databases in a location, hence
the mapset is used to query the correct temporal database.
"""
table_exists = False
connected = False
if not self.connected:
self.connect()
connected = True
# Check if the database already exists
if self.dbmi.__name__ == "sqlite3":
self.cursor.execute("SELECT name FROM sqlite_master WHERE "
"type='table' AND name='%s';" % table_name)
name = self.cursor.fetchone()
if name and name[0] == table_name:
table_exists = True
else:
# Check for raster_base table
self.cursor.execute("SELECT EXISTS(SELECT * FROM information_schema.tables "
"WHERE table_name=%s)", ('%s' % table_name,))
if self.cursor.fetchone()[0]:
table_exists = True
if connected:
self.close()
return table_exists
def execute(self, statement, args=None):
"""Execute a SQL statement
:param statement: The executable SQL statement or SQL script
"""
connected = False
if not self.connected:
self.connect()
connected = True
try:
if args:
self.cursor.execute(statement, args)
else:
self.cursor.execute(statement)
except:
if connected:
self.close()
self.msgr.error(_("Unable to execute :\n %(sql)s" %
{"sql": statement}))
raise
if connected:
self.close()
def fetchone(self):
if self.connected:
return self.cursor.fetchone()
return None
def fetchall(self):
if self.connected:
return self.cursor.fetchall()
return None
def execute_transaction(self, statement, mapset=None):
"""Execute a transactional SQL statement
The BEGIN and END TRANSACTION statements will be added automatically
to the sql statement
:param statement: The executable SQL statement or SQL script
"""
connected = False
if not self.connected:
self.connect()
connected = True
sql_script = ""
sql_script += "BEGIN TRANSACTION;\n"
sql_script += statement
sql_script += "END TRANSACTION;"
try:
if self.dbmi.__name__ == "sqlite3":
self.cursor.executescript(statement)
else:
self.cursor.execute(statement)
self.connection.commit()
except:
if connected:
self.close()
self.msgr.error(_("Unable to execute transaction:\n %(sql)s" %
{"sql": statement}))
raise
if connected:
self.close()
###############################################################################
def init_dbif(dbif):
"""This method checks if the database interface connection exists,
if not a new one will be created, connected and True will be returned.
If the database interface exists but is connected, the connection will
be established.
:returns: the tuple (dbif, True|False)
Usage code sample:
.. code-block:: python
dbif, connect = tgis.init_dbif(None)
sql = dbif.mogrify_sql_statement(["SELECT * FROM raster_base WHERE ? = ?"],
["id", "soil@PERMANENT"])
dbif.execute_transaction(sql)
if connect:
dbif.close()
"""
if dbif is None:
dbif = SQLDatabaseInterfaceConnection()
dbif.connect()
return dbif, True
elif dbif.is_connected() is False:
dbif.connect()
return dbif, True
return dbif, False
###############################################################################
if __name__ == "__main__":
import doctest
doctest.testmod()
|
rollback
|
CustomEdge.tsx
|
import { FC } from 'react';
import { EdgeProps, getBezierPath, getMarkerEnd } from 'react-flow-renderer';
const CustomEdge: FC<EdgeProps> = ({
id,
sourceX,
sourceY,
targetX,
targetY,
sourcePosition,
targetPosition,
data,
arrowHeadType,
markerEndId,
}) => {
const edgePath = getBezierPath({ sourceX, sourceY, sourcePosition, targetX, targetY, targetPosition });
const markerEnd = getMarkerEnd(arrowHeadType, markerEndId);
return (
<>
<path id={id} className="react-flow__edge-path" d={edgePath} markerEnd={markerEnd} />
<text>
<textPath href={`#${id}`} style={{ fontSize: '12px' }} startOffset="50%" textAnchor="middle">
{data.text}
</textPath>
</text>
</>
);
|
};
export default CustomEdge;
| |
api.go
|
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreedto in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vtctld
import (
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
log "github.com/golang/glog"
"golang.org/x/net/context"
"github.com/youtube/vitess/go/acl"
"github.com/youtube/vitess/go/vt/logutil"
"github.com/youtube/vitess/go/vt/schemamanager"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/topo/topoproto"
"github.com/youtube/vitess/go/vt/vtctl"
"github.com/youtube/vitess/go/vt/vttablet/tmclient"
"github.com/youtube/vitess/go/vt/workflow"
"github.com/youtube/vitess/go/vt/wrangler"
logutilpb "github.com/youtube/vitess/go/vt/proto/logutil"
topodatapb "github.com/youtube/vitess/go/vt/proto/topodata"
)
var (
localCell = flag.String("cell", "", "cell to use")
showTopologyCRUD = flag.Bool("vtctld_show_topology_crud", true, "Controls the display of the CRUD topology actions in the vtctld UI.")
)
// This file implements a REST-style API for the vtctld web interface.
const (
apiPrefix = "/api/"
jsonContentType = "application/json; charset=utf-8"
)
func httpErrorf(w http.ResponseWriter, r *http.Request, format string, args ...interface{}) {
errMsg := fmt.Sprintf(format, args...)
log.Errorf("HTTP error on %v: %v, request: %#v", r.URL.Path, errMsg, r)
http.Error(w, errMsg, http.StatusInternalServerError)
}
func handleAPI(apiPath string, handlerFunc func(w http.ResponseWriter, r *http.Request) error) {
http.HandleFunc(apiPrefix+apiPath, func(w http.ResponseWriter, r *http.Request) {
defer func() {
if x := recover(); x != nil {
httpErrorf(w, r, "uncaught panic: %v", x)
}
}()
if err := handlerFunc(w, r); err != nil {
httpErrorf(w, r, "%v", err)
}
})
}
func handleCollection(collection string, getFunc func(*http.Request) (interface{}, error))
|
func getItemPath(url string) string {
// Strip API prefix.
if !strings.HasPrefix(url, apiPrefix) {
return ""
}
url = url[len(apiPrefix):]
// Strip collection name.
parts := strings.SplitN(url, "/", 2)
if len(parts) != 2 {
return ""
}
return parts[1]
}
func unmarshalRequest(r *http.Request, v interface{}) error {
data, err := ioutil.ReadAll(r.Body)
if err != nil {
return err
}
return json.Unmarshal(data, v)
}
func initAPI(ctx context.Context, ts *topo.Server, actions *ActionRepository, realtimeStats *realtimeStats) {
tabletHealthCache := newTabletHealthCache(ts)
tmClient := tmclient.NewTabletManagerClient()
// Cells
handleCollection("cells", func(r *http.Request) (interface{}, error) {
if getItemPath(r.URL.Path) != "" {
return nil, errors.New("cells can only be listed, not retrieved")
}
return ts.GetKnownCells(ctx)
})
// Keyspaces
handleCollection("keyspaces", func(r *http.Request) (interface{}, error) {
keyspace := getItemPath(r.URL.Path)
switch r.Method {
case "GET":
// List all keyspaces.
if keyspace == "" {
return ts.GetKeyspaces(ctx)
}
// Get the keyspace record.
k, err := ts.GetKeyspace(ctx, keyspace)
if err != nil {
return nil, err
}
// Pass the embedded proto directly or jsonpb will panic.
return k.Keyspace, err
// Perform an action on a keyspace.
case "POST":
if keyspace == "" {
return nil, errors.New("A POST request needs a keyspace in the URL")
}
if err := r.ParseForm(); err != nil {
return nil, err
}
action := r.FormValue("action")
if action == "" {
return nil, errors.New("A POST request must specify action")
}
return actions.ApplyKeyspaceAction(ctx, action, keyspace, r), nil
default:
return nil, fmt.Errorf("unsupported HTTP method: %v", r.Method)
}
})
// Shards
handleCollection("shards", func(r *http.Request) (interface{}, error) {
shardPath := getItemPath(r.URL.Path)
if !strings.Contains(shardPath, "/") {
return nil, fmt.Errorf("invalid shard path: %q", shardPath)
}
parts := strings.SplitN(shardPath, "/", 2)
keyspace := parts[0]
shard := parts[1]
// List the shards in a keyspace.
if shard == "" {
return ts.GetShardNames(ctx, keyspace)
}
// Perform an action on a shard.
if r.Method == "POST" {
if err := r.ParseForm(); err != nil {
return nil, err
}
action := r.FormValue("action")
if action == "" {
return nil, errors.New("must specify action")
}
return actions.ApplyShardAction(ctx, action, keyspace, shard, r), nil
}
// Get the shard record.
si, err := ts.GetShard(ctx, keyspace, shard)
if err != nil {
return nil, err
}
// Pass the embedded proto directly or jsonpb will panic.
return si.Shard, err
})
// SrvKeyspace
handleCollection("srv_keyspace", func(r *http.Request) (interface{}, error) {
keyspacePath := getItemPath(r.URL.Path)
parts := strings.SplitN(keyspacePath, "/", 2)
// Request was incorrectly formatted.
if len(parts) != 2 {
return nil, fmt.Errorf("invalid srvkeyspace path: %q expected path: /srv_keyspace/<cell>/<keyspace>", keyspacePath)
}
cell := parts[0]
keyspace := parts[1]
if cell == "local" {
if *localCell == "" {
return nil, fmt.Errorf("local cell requested, but not specified. Please set with -cell flag")
}
cell = *localCell
}
// If a keyspace is provided then return the specified srvkeyspace.
if keyspace != "" {
srvKeyspace, err := ts.GetSrvKeyspace(ctx, cell, keyspace)
if err != nil {
return nil, fmt.Errorf("Can't get server keyspace: %v", err)
}
return srvKeyspace, nil
}
// Else return the srvKeyspace from all keyspaces.
srvKeyspaces := make(map[string]interface{})
keyspaceNamesList, err := ts.GetSrvKeyspaceNames(ctx, cell)
if err != nil {
return nil, fmt.Errorf("can't get list of SrvKeyspaceNames for cell %q: GetSrvKeyspaceNames returned: %v", cell, err)
}
for _, keyspaceName := range keyspaceNamesList {
srvKeyspace, err := ts.GetSrvKeyspace(ctx, cell, keyspaceName)
if err != nil {
// If a keyspace is in the process of being set up, it exists
// in the list of keyspaces but GetSrvKeyspace fails.
//
// Instead of returning this error, simply skip it in the
// loop so we still return the other valid keyspaces.
continue
}
srvKeyspaces[keyspaceName] = srvKeyspace
}
return srvKeyspaces, nil
})
// Tablets
handleCollection("tablets", func(r *http.Request) (interface{}, error) {
tabletPath := getItemPath(r.URL.Path)
// List tablets based on query params.
if tabletPath == "" {
if err := r.ParseForm(); err != nil {
return nil, err
}
shardRef := r.FormValue("shard")
cell := r.FormValue("cell")
if shardRef != "" {
// Look up by keyspace/shard, and optionally cell.
keyspace, shard, err := topoproto.ParseKeyspaceShard(shardRef)
if err != nil {
return nil, err
}
if cell != "" {
result, err := ts.FindAllTabletAliasesInShardByCell(ctx, keyspace, shard, []string{cell})
if err != nil && err != topo.ErrPartialResult {
return result, err
}
return result, nil
}
result, err := ts.FindAllTabletAliasesInShard(ctx, keyspace, shard)
if err != nil && err != topo.ErrPartialResult {
return result, err
}
return result, nil
}
// Get all tablets in a cell.
if cell == "" {
return nil, errors.New("cell param required")
}
return ts.GetTabletsByCell(ctx, cell)
}
// Get tablet health.
if parts := strings.Split(tabletPath, "/"); len(parts) == 2 && parts[1] == "health" {
tabletAlias, err := topoproto.ParseTabletAlias(parts[0])
if err != nil {
return nil, err
}
return tabletHealthCache.Get(ctx, tabletAlias)
}
tabletAlias, err := topoproto.ParseTabletAlias(tabletPath)
if err != nil {
return nil, err
}
// Perform an action on a tablet.
if r.Method == "POST" {
if err := r.ParseForm(); err != nil {
return nil, err
}
action := r.FormValue("action")
if action == "" {
return nil, errors.New("must specify action")
}
return actions.ApplyTabletAction(ctx, action, tabletAlias, r), nil
}
// Get the tablet record.
t, err := ts.GetTablet(ctx, tabletAlias)
if err != nil {
return nil, err
}
// Pass the embedded proto directly or jsonpb will panic.
return t.Tablet, err
})
// Healthcheck real time status per (cell, keyspace, tablet type, metric).
handleCollection("tablet_statuses", func(r *http.Request) (interface{}, error) {
targetPath := getItemPath(r.URL.Path)
// Get the heatmap data based on query parameters.
if targetPath == "" {
if err := r.ParseForm(); err != nil {
return nil, err
}
keyspace := r.FormValue("keyspace")
cell := r.FormValue("cell")
tabletType := r.FormValue("type")
_, err := topoproto.ParseTabletType(tabletType)
// Excluding the case where parse fails because all tabletTypes was chosen.
if err != nil && tabletType != "all" {
return nil, fmt.Errorf("invalid tablet type: %v ", err)
}
metric := r.FormValue("metric")
// Setting default values if none was specified in the query params.
if keyspace == "" {
keyspace = "all"
}
if cell == "" {
cell = "all"
}
if tabletType == "" {
tabletType = "all"
}
if metric == "" {
metric = "health"
}
if realtimeStats == nil {
return nil, fmt.Errorf("realtimeStats not initialized")
}
heatmap, err := realtimeStats.heatmapData(keyspace, cell, tabletType, metric)
if err != nil {
return nil, fmt.Errorf("couldn't get heatmap data: %v", err)
}
return heatmap, nil
}
return nil, fmt.Errorf("invalid target path: %q expected path: ?keyspace=<keyspace>&cell=<cell>&type=<type>&metric=<metric>", targetPath)
})
handleCollection("tablet_health", func(r *http.Request) (interface{}, error) {
tabletPath := getItemPath(r.URL.Path)
parts := strings.SplitN(tabletPath, "/", 2)
// Request was incorrectly formatted.
if len(parts) != 2 {
return nil, fmt.Errorf("invalid tablet_health path: %q expected path: /tablet_health/<cell>/<uid>", tabletPath)
}
if realtimeStats == nil {
return nil, fmt.Errorf("realtimeStats not initialized")
}
cell := parts[0]
uidStr := parts[1]
uid, err := topoproto.ParseUID(uidStr)
if err != nil {
return nil, fmt.Errorf("incorrect uid: %v", err)
}
tabletAlias := topodatapb.TabletAlias{
Cell: cell,
Uid: uid,
}
tabletStat, err := realtimeStats.tabletStats(&tabletAlias)
if err != nil {
return nil, fmt.Errorf("could not get tabletStats: %v", err)
}
return tabletStat, nil
})
handleCollection("topology_info", func(r *http.Request) (interface{}, error) {
targetPath := getItemPath(r.URL.Path)
// Retrieving topology information (keyspaces, cells, and types) based on query params.
if targetPath == "" {
if err := r.ParseForm(); err != nil {
return nil, err
}
keyspace := r.FormValue("keyspace")
cell := r.FormValue("cell")
// Setting default values if none was specified in the query params.
if keyspace == "" {
keyspace = "all"
}
if cell == "" {
cell = "all"
}
if realtimeStats == nil {
return nil, fmt.Errorf("realtimeStats not initialized")
}
return realtimeStats.topologyInfo(keyspace, cell), nil
}
return nil, fmt.Errorf("invalid target path: %q expected path: ?keyspace=<keyspace>&cell=<cell>", targetPath)
})
// Vtctl Command
handleAPI("vtctl/", func(w http.ResponseWriter, r *http.Request) error {
if err := acl.CheckAccessHTTP(r, acl.ADMIN); err != nil {
http.Error(w, "403 Forbidden", http.StatusForbidden)
return nil
}
var args []string
resp := struct {
Error string
Output string
}{}
if err := unmarshalRequest(r, &args); err != nil {
return fmt.Errorf("can't unmarshal request: %v", err)
}
logstream := logutil.NewMemoryLogger()
wr := wrangler.New(logstream, ts, tmClient)
// TODO(enisoc): Context for run command should be request-scoped.
err := vtctl.RunCommand(ctx, wr, args)
if err != nil {
resp.Error = err.Error()
}
resp.Output = logstream.String()
data, err := json.MarshalIndent(resp, "", " ")
if err != nil {
return fmt.Errorf("json error: %v", err)
}
w.Header().Set("Content-Type", jsonContentType)
w.Write(data)
return nil
})
// Schema Change
handleAPI("schema/apply", func(w http.ResponseWriter, r *http.Request) error {
if err := acl.CheckAccessHTTP(r, acl.ADMIN); err != nil {
http.Error(w, "403 Forbidden", http.StatusForbidden)
return nil
}
req := struct {
Keyspace, SQL string
SlaveTimeoutSeconds int
}{}
if err := unmarshalRequest(r, &req); err != nil {
return fmt.Errorf("can't unmarshal request: %v", err)
}
if req.SlaveTimeoutSeconds <= 0 {
req.SlaveTimeoutSeconds = 10
}
logger := logutil.NewCallbackLogger(func(ev *logutilpb.Event) {
w.Write([]byte(logutil.EventString(ev)))
})
wr := wrangler.New(logger, ts, tmClient)
executor := schemamanager.NewTabletExecutor(
wr, time.Duration(req.SlaveTimeoutSeconds)*time.Second)
return schemamanager.Run(ctx,
schemamanager.NewUIController(req.SQL, req.Keyspace, w), executor)
})
// Features
handleAPI("features", func(w http.ResponseWriter, r *http.Request) error {
if err := acl.CheckAccessHTTP(r, acl.ADMIN); err != nil {
http.Error(w, "403 Forbidden", http.StatusForbidden)
return nil
}
resp := make(map[string]interface{})
resp["activeReparents"] = !*vtctl.DisableActiveReparents
resp["showStatus"] = *enableRealtimeStats
resp["showTopologyCRUD"] = *showTopologyCRUD
resp["showWorkflows"] = *workflowManagerInit
resp["workflows"] = workflow.AvailableFactories()
data, err := json.MarshalIndent(resp, "", " ")
if err != nil {
return fmt.Errorf("json error: %v", err)
}
w.Header().Set("Content-Type", jsonContentType)
w.Write(data)
return nil
})
}
|
{
handleAPI(collection+"/", func(w http.ResponseWriter, r *http.Request) error {
// Get the requested object.
obj, err := getFunc(r)
if err != nil {
if err == topo.ErrNoNode {
http.NotFound(w, r)
return nil
}
return fmt.Errorf("can't get %v: %v", collection, err)
}
// JSON encode response.
data, err := vtctl.MarshalJSON(obj)
if err != nil {
return fmt.Errorf("cannot marshal data: %v", err)
}
w.Header().Set("Content-Type", jsonContentType)
w.Write(data)
return nil
})
}
|
ibrd.rs
|
#[doc = "Reader of register IBRD"]
pub type R = crate::R<u32, super::IBRD>;
#[doc = "Writer for register IBRD"]
pub type W = crate::W<u32, super::IBRD>;
#[doc = "Register IBRD `reset()`'s with value 0"]
impl crate::ResetValue for super::IBRD {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `DIVINT`"]
pub type DIVINT_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `DIVINT`"]
pub struct DIVINT_W<'a> {
|
w: &'a mut W,
}
impl<'a> DIVINT_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff) | ((value as u32) & 0xffff);
self.w
}
}
impl R {
#[doc = "Bits 0:15 - These bits hold the baud integer divisor."]
#[inline(always)]
pub fn divint(&self) -> DIVINT_R {
DIVINT_R::new((self.bits & 0xffff) as u16)
}
}
impl W {
#[doc = "Bits 0:15 - These bits hold the baud integer divisor."]
#[inline(always)]
pub fn divint(&mut self) -> DIVINT_W {
DIVINT_W { w: self }
}
}
| |
proxmox.py
|
# This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _OnPrem
class _Proxmox(_OnPrem):
|
class Pve(_Proxmox):
_icon = "pve.png"
# Aliases
PVE = ProxmoxVE
|
_type = "proxmox"
_icon_dir = "resources/onprem/proxmox"
|
index.js
|
'use strict'
Object.defineProperty(exports, '__esModule', {
value: true
})
exports.default = addISOWeekYears
var _index = require('../_lib/toInteger/index.js')
var _index2 = _interopRequireDefault(_index)
var _index3 = require('../getISOWeekYear/index.js')
var _index4 = _interopRequireDefault(_index3)
var _index5 = require('../setISOWeekYear/index.js')
var _index6 = _interopRequireDefault(_index5)
function
|
(obj) {
return obj && obj.__esModule ? obj : { default: obj }
}
/**
* @name addISOWeekYears
* @category ISO Week-Numbering Year Helpers
* @summary Add the specified number of ISO week-numbering years to the given date.
*
* @description
* Add the specified number of ISO week-numbering years to the given date.
*
* ISO week-numbering year: http://en.wikipedia.org/wiki/ISO_week_date
*
*
* ### v2.0.0 breaking changes:
*
* - [Changes that are common for the whole library](https://github.com/date-fns/date-fns/blob/master/docs/upgradeGuide.md#Common-Changes).
*
* - The function was renamed from `addISOYears` to `addISOWeekYears`.
* "ISO week year" is short for [ISO week-numbering year](https://en.wikipedia.org/wiki/ISO_week_date).
* This change makes the name consistent with
* locale-dependent week-numbering year helpers, e.g., `addWeekYears`.
*
* @param {Date|String|Number} date - the date to be changed
* @param {Number} amount - the amount of ISO week-numbering years to be added
* @param {Options} [options] - the object with options. See [Options]{@link https://date-fns.org/docs/Options}
* @param {0|1|2} [options.additionalDigits=2] - passed to `toDate`. See [toDate]{@link https://date-fns.org/docs/toDate}
* @returns {Date} the new date with the ISO week-numbering years added
* @throws {TypeError} 2 arguments required
* @throws {RangeError} `options.additionalDigits` must be 0, 1 or 2
*
* @example
* // Add 5 ISO week-numbering years to 2 July 2010:
* var result = addISOWeekYears(new Date(2010, 6, 2), 5)
* //=> Fri Jun 26 2015 00:00:00
*/
function addISOWeekYears(dirtyDate, dirtyAmount, dirtyOptions) {
if (arguments.length < 2) {
throw new TypeError(
'2 arguments required, but only ' + arguments.length + ' present'
)
}
var amount = (0, _index2.default)(dirtyAmount)
return (0, _index6.default)(
dirtyDate,
(0, _index4.default)(dirtyDate, dirtyOptions) + amount,
dirtyOptions
)
}
module.exports = exports['default']
|
_interopRequireDefault
|
legacy_querier_test.go
|
package keeper
import (
"strings"
"testing"
core "github.com/terra-money/core/types"
"github.com/terra-money/core/x/treasury/types"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/x/staking"
)
const custom = "custom"
func getQueriedTaxRate(t *testing.T, ctx sdk.Context, cdc *codec.LegacyAmino, querier sdk.Querier, epoch int64) sdk.Dec {
query := abci.RequestQuery{
Path: strings.Join([]string{custom, types.QuerierRoute, types.QueryTaxRate}, "/"),
Data: nil,
}
bz, err := querier(ctx, []string{types.QueryTaxRate}, query)
require.Nil(t, err)
require.NotNil(t, bz)
var response sdk.Dec
err2 := cdc.UnmarshalJSON(bz, &response)
require.Nil(t, err2)
return response
}
func getQueriedTaxCap(t *testing.T, ctx sdk.Context, cdc *codec.LegacyAmino, querier sdk.Querier, denom string) sdk.Int {
params := types.QueryTaxCapParams{
Denom: denom,
}
bz, err := cdc.MarshalJSON(params)
require.NoError(t, err)
query := abci.RequestQuery{
Path: strings.Join([]string{custom, types.QuerierRoute, types.QueryTaxCap}, "/"),
Data: bz,
}
bz, err = querier(ctx, []string{types.QueryTaxCap}, query)
require.Nil(t, err)
require.NotNil(t, bz)
var response sdk.Int
err2 := cdc.UnmarshalJSON(bz, &response)
require.Nil(t, err2)
return response
}
func getQueriedTaxCaps(t *testing.T, ctx sdk.Context, cdc *codec.LegacyAmino, querier sdk.Querier) types.TaxCapsQueryResponse {
query := abci.RequestQuery{
Path: strings.Join([]string{custom, types.QuerierRoute, types.QueryTaxCaps}, "/"),
Data: nil,
}
bz, err := querier(ctx, []string{types.QueryTaxCaps}, query)
require.Nil(t, err)
require.NotNil(t, bz)
var response types.TaxCapsQueryResponse
err2 := cdc.UnmarshalJSON(bz, &response)
require.Nil(t, err2)
return response
}
func getQueriedRewardWeight(t *testing.T, ctx sdk.Context, cdc *codec.LegacyAmino, querier sdk.Querier, epoch int64) sdk.Dec {
query := abci.RequestQuery{
Path: strings.Join([]string{custom, types.QuerierRoute, types.QueryRewardWeight}, "/"),
Data: nil,
}
bz, err := querier(ctx, []string{types.QueryRewardWeight}, query)
require.Nil(t, err)
require.NotNil(t, bz)
var response sdk.Dec
err2 := cdc.UnmarshalJSON(bz, &response)
require.Nil(t, err2)
return response
}
func getQueriedTaxProceeds(t *testing.T, ctx sdk.Context, cdc *codec.LegacyAmino, querier sdk.Querier, epoch int64) sdk.Coins {
query := abci.RequestQuery{
Path: strings.Join([]string{custom, types.QuerierRoute, types.QueryTaxProceeds}, "/"),
Data: nil,
}
bz, err := querier(ctx, []string{types.QueryTaxProceeds}, query)
require.Nil(t, err)
require.NotNil(t, bz)
var response sdk.Coins
err2 := cdc.UnmarshalJSON(bz, &response)
require.Nil(t, err2)
return response
}
func getQueriedSeigniorageProceeds(t *testing.T, ctx sdk.Context, cdc *codec.LegacyAmino, querier sdk.Querier, epoch int64) sdk.Int {
query := abci.RequestQuery{
Path: strings.Join([]string{custom, types.QuerierRoute, types.QuerySeigniorageProceeds}, "/"),
Data: nil,
}
bz, err := querier(ctx, []string{types.QuerySeigniorageProceeds}, query)
require.Nil(t, err)
require.NotNil(t, bz)
var response sdk.Int
err2 := cdc.UnmarshalJSON(bz, &response)
require.Nil(t, err2)
return response
}
func getQueriedParameters(t *testing.T, ctx sdk.Context, cdc *codec.LegacyAmino, querier sdk.Querier) types.Params {
query := abci.RequestQuery{
Path: strings.Join([]string{custom, types.QuerierRoute, types.QueryParameters}, "/"),
Data: []byte{},
}
bz, err := querier(ctx, []string{types.QueryParameters}, query)
require.Nil(t, err)
require.NotNil(t, bz)
var params types.Params
err2 := cdc.UnmarshalJSON(bz, ¶ms)
require.Nil(t, err2)
return params
}
func getQueriedIndicators(t *testing.T, ctx sdk.Context, cdc *codec.LegacyAmino, querier sdk.Querier) types.IndicatorQueryResponse {
query := abci.RequestQuery{
Path: strings.Join([]string{custom, types.QuerierRoute, types.QueryIndicators}, "/"),
Data: []byte{},
}
bz, err := querier(ctx, []string{types.QueryIndicators}, query)
require.Nil(t, err)
require.NotNil(t, bz)
var indicators types.IndicatorQueryResponse
err2 := cdc.UnmarshalJSON(bz, &indicators)
require.Nil(t, err2)
return indicators
}
func TestLegacyQueryParams(t *testing.T) {
input := CreateTestInput(t)
querier := NewLegacyQuerier(input.TreasuryKeeper, input.Cdc)
params := types.DefaultParams()
input.TreasuryKeeper.SetParams(input.Ctx, params)
queriedParams := getQueriedParameters(t, input.Ctx, input.Cdc, querier)
require.Equal(t, queriedParams, params)
}
func TestLegacyQueryRewardWeight(t *testing.T) {
input := CreateTestInput(t)
querier := NewLegacyQuerier(input.TreasuryKeeper, input.Cdc)
rewardWeight := sdk.NewDecWithPrec(77, 2)
input.TreasuryKeeper.SetRewardWeight(input.Ctx, rewardWeight)
queriedRewardWeight := getQueriedRewardWeight(t, input.Ctx, input.Cdc, querier, input.TreasuryKeeper.GetEpoch(input.Ctx))
require.Equal(t, queriedRewardWeight, rewardWeight)
}
func TestLegacyQueryTaxRate(t *testing.T) {
input := CreateTestInput(t)
querier := NewLegacyQuerier(input.TreasuryKeeper, input.Cdc)
taxRate := sdk.NewDecWithPrec(1, 3)
input.TreasuryKeeper.SetTaxRate(input.Ctx, taxRate)
queriedTaxRate := getQueriedTaxRate(t, input.Ctx, input.Cdc, querier, input.TreasuryKeeper.GetEpoch(input.Ctx))
require.Equal(t, queriedTaxRate, taxRate)
}
func TestLegacyQueryTaxCap(t *testing.T) {
input := CreateTestInput(t)
querier := NewLegacyQuerier(input.TreasuryKeeper, input.Cdc)
params := input.TreasuryKeeper.GetParams(input.Ctx)
// Get a currency super random; should default to policy coin.
queriedTaxCap := getQueriedTaxCap(t, input.Ctx, input.Cdc, querier, "hello")
require.Equal(t, queriedTaxCap, params.TaxPolicy.Cap.Amount)
}
func
|
(t *testing.T) {
input := CreateTestInput(t)
querier := NewLegacyQuerier(input.TreasuryKeeper, input.Cdc)
input.TreasuryKeeper.SetTaxCap(input.Ctx, "ukrw", sdk.NewInt(1000000000))
input.TreasuryKeeper.SetTaxCap(input.Ctx, "usdr", sdk.NewInt(1000000))
input.TreasuryKeeper.SetTaxCap(input.Ctx, "uusd", sdk.NewInt(1200000))
// Get a currency super random; should default to policy coin.
queriedTaxCaps := getQueriedTaxCaps(t, input.Ctx, input.Cdc, querier)
require.Equal(t, queriedTaxCaps,
types.TaxCapsQueryResponse{
{
Denom: "ukrw",
TaxCap: sdk.NewInt(1000000000),
},
{
Denom: "usdr",
TaxCap: sdk.NewInt(1000000),
},
{
Denom: "uusd",
TaxCap: sdk.NewInt(1200000),
},
},
)
}
func TestLegacyQueryTaxProceeds(t *testing.T) {
input := CreateTestInput(t)
querier := NewLegacyQuerier(input.TreasuryKeeper, input.Cdc)
taxProceeds := sdk.Coins{
sdk.NewCoin(core.MicroSDRDenom, sdk.NewInt(1000).MulRaw(core.MicroUnit)),
}
input.TreasuryKeeper.RecordEpochTaxProceeds(input.Ctx, taxProceeds)
queriedTaxProceeds := getQueriedTaxProceeds(t, input.Ctx, input.Cdc, querier, input.TreasuryKeeper.GetEpoch(input.Ctx))
require.Equal(t, queriedTaxProceeds, taxProceeds)
}
func TestLegacyQuerySeigniorageProceeds(t *testing.T) {
input := CreateTestInput(t)
querier := NewLegacyQuerier(input.TreasuryKeeper, input.Cdc)
targetSeigniorage := sdk.NewInt(10)
input.TreasuryKeeper.RecordEpochInitialIssuance(input.Ctx)
input.Ctx = input.Ctx.WithBlockHeight(int64(core.BlocksPerWeek))
input.BankKeeper.BurnCoins(input.Ctx, faucetAccountName, sdk.NewCoins(sdk.NewCoin(core.MicroLunaDenom, targetSeigniorage)))
queriedSeigniorageProceeds := getQueriedSeigniorageProceeds(t, input.Ctx, input.Cdc, querier, input.TreasuryKeeper.GetEpoch(input.Ctx))
require.Equal(t, targetSeigniorage, queriedSeigniorageProceeds)
}
func TestLegacyQueryIndicators(t *testing.T) {
input := CreateTestInput(t)
querier := NewLegacyQuerier(input.TreasuryKeeper, input.Cdc)
sh := staking.NewHandler(input.StakingKeeper)
stakingAmt := sdk.TokensFromConsensusPower(1, sdk.DefaultPowerReduction)
addr, val := ValAddrs[0], ValPubKeys[0]
addr1, val1 := ValAddrs[1], ValPubKeys[1]
_, err := sh(input.Ctx, NewTestMsgCreateValidator(addr, val, stakingAmt))
require.NoError(t, err)
_, err = sh(input.Ctx, NewTestMsgCreateValidator(addr1, val1, stakingAmt))
require.NoError(t, err)
staking.EndBlocker(input.Ctx.WithBlockHeight(int64(core.BlocksPerWeek)-1), input.StakingKeeper)
proceedsAmt := sdk.NewInt(1000000000000)
taxProceeds := sdk.NewCoins(sdk.NewCoin(core.MicroSDRDenom, proceedsAmt))
input.TreasuryKeeper.RecordEpochTaxProceeds(input.Ctx, taxProceeds)
targetIndicators := types.IndicatorQueryResponse{
TRLYear: proceedsAmt.ToDec().QuoInt(stakingAmt.MulRaw(2)),
TRLMonth: proceedsAmt.ToDec().QuoInt(stakingAmt.MulRaw(2)),
}
queriedIndicators := getQueriedIndicators(t, input.Ctx, input.Cdc, querier)
require.Equal(t, targetIndicators, queriedIndicators)
// Update indicators
input.TreasuryKeeper.UpdateIndicators(input.Ctx)
// Record same tax proceeds to get same trl
input.TreasuryKeeper.RecordEpochTaxProceeds(input.Ctx, taxProceeds)
// Change context to next epoch
input.Ctx = input.Ctx.WithBlockHeight(int64(core.BlocksPerWeek))
queriedIndicators = getQueriedIndicators(t, input.Ctx, input.Cdc, querier)
require.Equal(t, targetIndicators, queriedIndicators)
}
|
TestLegacyQueryTaxCaps
|
main.go
|
package main
import (
"context"
"errors"
"fmt"
"io"
"os"
"github.com/werf/werf/pkg/docker"
"github.com/werf/werf/pkg/util"
"github.com/werf/werf/pkg/buildah"
"github.com/werf/werf/pkg/werf"
)
var errUsage = errors.New("./buildah-test {auto|native-rootless|docker-with-fuse} DOCKERFILE_PATH [CONTEXT_PATH]")
func do(ctx context.Context) error {
var mode buildah.Mode
if v := os.Getenv("BUILDAH_TEST_MODE"); v != "" {
mode = buildah.Mode(v)
} else {
if len(os.Args) < 2 {
return errUsage
}
mode = buildah.ResolveMode(buildah.Mode(os.Args[1]))
os.Setenv("BUILDAH_TEST_MODE", string(mode))
}
shouldTerminate, err := buildah.ProcessStartupHook(mode)
if err != nil {
return fmt.Errorf("buildah process startup hook failed: %s", err)
}
if shouldTerminate {
return nil
}
if err := werf.Init("", ""); err != nil {
return fmt.Errorf("unable to init werf subsystem: %s", err)
}
mode = buildah.ResolveMode(mode)
fmt.Printf("Using buildah mode: %s\n", mode)
if mode == buildah.ModeDockerWithFuse {
if err := docker.Init(ctx, "", false, false, ""); err != nil {
return err
|
}
if len(os.Args) < 3 {
return errUsage
}
var dockerfilePath = os.Args[2]
var contextDir string
if len(os.Args) > 3 {
contextDir = os.Args[3]
}
b, err := buildah.NewBuildah(mode, buildah.BuildahOpts{})
if err != nil {
return fmt.Errorf("unable to create buildah client: %s", err)
}
dockerfileData, err := os.ReadFile(dockerfilePath)
if err != nil {
return fmt.Errorf("error reading %q: %s", dockerfilePath, err)
}
var contextTar io.Reader
if contextDir != "" {
contextTar = util.ReadDirAsTar(contextDir)
}
imageId, err := b.BuildFromDockerfile(ctx, dockerfileData, buildah.BuildFromDockerfileOpts{
ContextTar: contextTar,
CommonOpts: buildah.CommonOpts{
LogWriter: os.Stdout,
},
})
if err != nil {
return fmt.Errorf("BuildFromDockerfile failed: %s", err)
}
fmt.Fprintf(os.Stdout, "INFO: built imageId is %s\n", imageId)
return nil
}
func main() {
if err := do(context.Background()); err != nil {
fmt.Fprintf(os.Stderr, "Error: %s\n", err)
os.Exit(1)
}
}
|
}
|
NodeGraph.py
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
class NodeGraph( GafferUI.EditorWidget ) :
def __init__( self, scriptNode, **kw ) :
self.__gadgetWidget = GafferUI.GadgetWidget(
bufferOptions = set( [
GafferUI.GLWidget.BufferOptions.Double,
] ),
)
GafferUI.EditorWidget.__init__( self, self.__gadgetWidget, scriptNode, **kw )
graphGadget = GafferUI.GraphGadget( self.scriptNode() )
self.__rootChangedConnection = graphGadget.rootChangedSignal().connect( Gaffer.WeakMethod( self.__rootChanged ) )
self.__gadgetWidget.getViewportGadget().setPrimaryChild( graphGadget )
self.__gadgetWidget.getViewportGadget().setDragTracking( True )
self.__frame( scriptNode.selection() )
self.__buttonPressConnection = self.__gadgetWidget.buttonPressSignal().connect( Gaffer.WeakMethod( self.__buttonPress ) )
self.__keyPressConnection = self.__gadgetWidget.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPress ) )
self.__buttonDoubleClickConnection = self.__gadgetWidget.buttonDoubleClickSignal().connect( Gaffer.WeakMethod( self.__buttonDoubleClick ) )
self.__dragEnterConnection = self.__gadgetWidget.dragEnterSignal().connect( Gaffer.WeakMethod( self.__dragEnter ) )
self.__dropConnection = self.__gadgetWidget.dropSignal().connect( Gaffer.WeakMethod( self.__drop ) )
self.__preRenderConnection = self.__gadgetWidget.getViewportGadget().preRenderSignal().connect( Gaffer.WeakMethod( self.__preRender ) )
self.__nodeMenu = None
## Returns the internal GadgetWidget holding the GraphGadget.
def graphGadgetWidget( self ) :
return self.__gadgetWidget
## Returns the internal Gadget used to draw the graph. This may be
# modified directly to set up appropriate filters etc. This is just
# a convenience method returning graphGadgetWidget().getViewportGadget().getPrimaryChild().
def graphGadget( self ) :
return self.graphGadgetWidget().getViewportGadget().getPrimaryChild()
## Frames the specified nodes in the viewport. If extend is True
# then the current framing will be extended to include the specified
# nodes, if False then the framing will be reset to frame only the
# nodes specified.
def frame( self, nodes, extend=False ) :
self.__frame( nodes, extend )
def getTitle( self ) :
title = super( NodeGraph, self ).getTitle()
if title:
return title
result = IECore.CamelCase.toSpaced( self.__class__.__name__ )
root = self.graphGadget().getRoot()
if not root.isSame( self.scriptNode() ) :
result += " : " + root.relativeName( self.scriptNode() ).replace( ".", " / " )
return result
__plugContextMenuSignal = Gaffer.Signal3()
## Returns a signal which is emitted to create a context menu for a
# plug in the graph. Slots may connect to this signal to edit the
# menu definition on the fly - the signature for the signal is
# ( nodeGraph, plug, menuDefinition ) and the menu definition should just be
# edited in place.
@classmethod
def plugContextMenuSignal( cls ) :
return cls.__plugContextMenuSignal
__connectionContextMenuSignal = Gaffer.Signal3()
## Returns a signal which is emitted to create a context menu for a
# connection in the graph. Slots may connect to this signal to edit the
# menu definition on the fly - the signature for the signal is
# ( nodeGraph, destinationPlug, menuDefinition ) and the menu definition
# should just be edited in place.
@classmethod
def connectionContextMenuSignal( cls ) :
return cls.__connectionContextMenuSignal
__nodeContextMenuSignal = Gaffer.Signal3()
## Returns a signal which is emitted to create a context menu for a
# node in the graph. Slots may connect to this signal to edit the
# menu definition on the fly - the signature for the signal is
# ( nodeGraph, node, menuDefinition ) and the menu definition should just be
# edited in place. Typically you would add slots to this signal
# as part of a startup script.
@classmethod
def nodeContextMenuSignal( cls ) :
return cls.__nodeContextMenuSignal
## May be used from a slot attached to nodeContextMenuSignal() to install some
# standard menu items for modifying the connection visibility for a node.
@classmethod
def appendConnectionVisibilityMenuDefinitions( cls, nodeGraph, node, menuDefinition ) :
menuDefinition.append( "/ConnectionVisibilityDivider", { "divider" : True } )
menuDefinition.append(
"/Show Input Connections",
{
"checkBox" : IECore.curry( cls.__getNodeInputConnectionsVisible, nodeGraph.graphGadget(), node ),
"command" : IECore.curry( cls.__setNodeInputConnectionsVisible, nodeGraph.graphGadget(), node )
}
)
menuDefinition.append(
"/Show Output Connections",
{
"checkBox" : IECore.curry( cls.__getNodeOutputConnectionsVisible, nodeGraph.graphGadget(), node ),
"command" : IECore.curry( cls.__setNodeOutputConnectionsVisible, nodeGraph.graphGadget(), node )
}
)
## May be used from a slot attached to nodeContextMenuSignal() to install a
# standard menu item for modifying the enabled state of a node.
@classmethod
def appendEnabledPlugMenuDefinitions( cls, nodeGraph, node, menuDefinition ) :
enabledPlug = node.enabledPlug() if isinstance( node, Gaffer.DependencyNode ) else None
if enabledPlug is not None :
menuDefinition.append( "/EnabledDivider", { "divider" : True } )
menuDefinition.append(
"/Enabled",
{
"command" : IECore.curry( cls.__setEnabled, node ),
"checkBox" : enabledPlug.getValue(),
"active" : enabledPlug.settable()
}
)
__nodeDoubleClickSignal = Gaffer.Signal2()
## Returns a signal which is emitted whenever a node is double clicked.
# Slots should have the signature ( nodeGraph, node ).
@classmethod
def nodeDoubleClickSignal( cls ) :
return cls.__nodeDoubleClickSignal
## Ensures that the specified node has a visible NodeGraph viewing
# it, and returns that editor.
## \todo Consider how this relates to the todo items in NodeSetEditor.acquire().
@classmethod
def acquire( cls, rootNode ) :
if isinstance( rootNode, Gaffer.ScriptNode ) :
script = rootNode
else :
script = rootNode.scriptNode()
scriptWindow = GafferUI.ScriptWindow.acquire( script )
tabbedContainer = None
for editor in scriptWindow.getLayout().editors( type = GafferUI.NodeGraph ) :
if rootNode.isSame( editor.graphGadget().getRoot() ) :
editor.parent().setCurrent( editor )
return editor
editor = NodeGraph( script )
editor.graphGadget().setRoot( rootNode )
scriptWindow.getLayout().addEditor( editor )
return editor
def __repr__( self ) :
return "GafferUI.NodeGraph( scriptNode )"
def _nodeMenu( self ) :
if self.__nodeMenu is None :
self.__nodeMenu = GafferUI.Menu( GafferUI.NodeMenu.acquire( self.scriptNode().applicationRoot() ).definition(), searchable=True )
self.__nodeMenuVisibilityChangedConnection = self.__nodeMenu.visibilityChangedSignal().connect( Gaffer.WeakMethod( self.__nodeMenuVisibilityChanged ) )
return self.__nodeMenu
def __nodeMenuVisibilityChanged( self, widget ) :
assert( widget is self.__nodeMenu )
if not self.__nodeMenu.visible() :
# generally we steal focus on mouse enter (implemented in GadgetWidget),
# but when the node menu closes we may not get an enter event, so we have to steal
# the focus back here.
self.__gadgetWidget._qtWidget().setFocus()
def __buttonPress( self, widget, event ) :
if event.buttons & GafferUI.ButtonEvent.Buttons.Right :
# right click - display either the node creation popup menu
# or a menu specific to the node/plug/connection under the
# mouse if possible.
viewport = self.__gadgetWidget.getViewportGadget()
gadgets = viewport.gadgetsAt( IECore.V2f( event.line.p1.x, event.line.p1.y ) )
if len( gadgets ) :
overrideMenuDefinition = IECore.MenuDefinition()
overrideMenuTitle = None
if isinstance( gadgets[0], GafferUI.Nodule ) :
self.plugContextMenuSignal()( self, gadgets[0].plug(), overrideMenuDefinition )
overrideMenuTitle = gadgets[0].plug().relativeName( self.graphGadget().getRoot() )
elif isinstance( gadgets[0], GafferUI.ConnectionGadget ) :
self.connectionContextMenuSignal()( self, gadgets[0].dstNodule().plug(), overrideMenuDefinition )
overrideMenuTitle = "-> " + gadgets[0].dstNodule().plug().relativeName( self.graphGadget().getRoot() )
else :
nodeGadget = gadgets[0]
if not isinstance( nodeGadget, GafferUI.NodeGadget ) :
nodeGadget = nodeGadget.ancestor( GafferUI.NodeGadget )
if nodeGadget is not None :
self.nodeContextMenuSignal()( self, nodeGadget.node(), overrideMenuDefinition )
overrideMenuTitle = nodeGadget.node().getName()
if len( overrideMenuDefinition.items() ) :
menuDefinition = overrideMenuDefinition
self._m = GafferUI.Menu( menuDefinition, title=overrideMenuTitle )
self._m.popup( self )
return True
self._nodeMenu().popup( self )
return True
return False
def __nodeGadgetAt( self, position ) :
viewport = self.__gadgetWidget.getViewportGadget()
line = viewport.rasterToGadgetSpace( IECore.V2f( position.x, position.y ), gadget = self.graphGadget() )
return self.graphGadget().nodeGadgetAt( line )
def __keyPress( self, widget, event ) :
if event.key == "F" and not event.modifiers :
self.__frame( self.scriptNode().selection() )
return True
## \todo This cursor key navigation might not make sense for all applications,
# so we should move it into BoxUI and load it in a config file that the gui app uses.
# I think this implies that every Widget.*Signal() method should have a
# Widget.static*Signal() method to allow global handlers to be registered by widget type.
# We already have a mix of static/nonstatic signals for menus, so that might make a nice
# generalisation.
elif event.key == "Down" :
selection = self.scriptNode().selection()
if selection.size() :
|
if isinstance( selection[0], Gaffer.Box ) or event.modifiers == event.modifiers.Shift | event.modifiers.Control :
self.graphGadget().setRoot( selection[0] )
return True
elif event.key == "Up" :
root = self.graphGadget().getRoot()
if not isinstance( root, Gaffer.ScriptNode ) :
self.graphGadget().setRoot( root.parent() )
return True
elif event.key == "Tab" :
self._nodeMenu().popup( self )
return True
return False
def __frame( self, nodes, extend = False ) :
graphGadget = self.graphGadget()
# get the bounds of the nodes
bound = IECore.Box3f()
for node in nodes :
nodeGadget = graphGadget.nodeGadget( node )
if nodeGadget :
bound.extendBy( nodeGadget.transformedBound( graphGadget ) )
# if there were no nodes then use the bound of the whole
# graph.
if bound.isEmpty() :
bound = graphGadget.bound()
# if there's still nothing then an arbitrary area in the centre of the world
if bound.isEmpty() :
bound = IECore.Box3f( IECore.V3f( -10, -10, 0 ), IECore.V3f( 10, 10, 0 ) )
# pad it a little bit so
# it sits nicer in the frame
bound.min -= IECore.V3f( 1, 1, 0 )
bound.max += IECore.V3f( 1, 1, 0 )
if extend :
# we're extending the existing framing, which we assume the
# user was happy with other than it not showing the nodes in question.
# so we just take the union of the existing frame and the one for the nodes.
cb = self.__currentFrame()
bound.extendBy( IECore.Box3f( IECore.V3f( cb.min.x, cb.min.y, 0 ), IECore.V3f( cb.max.x, cb.max.y, 0 ) ) )
else :
# we're reframing from scratch, so the frame for the nodes is all we need.
# we do however want to make sure that we don't zoom in too far if the node
# bounds are small, as having a single node filling the screen is of little use -
# it's better to see some context around it.
boundSize = bound.size()
widgetSize = IECore.V3f( self._qtWidget().width(), self._qtWidget().height(), 0 )
pixelsPerUnit = widgetSize / boundSize
adjustedPixelsPerUnit = min( pixelsPerUnit.x, pixelsPerUnit.y, 10 )
newBoundSize = widgetSize / adjustedPixelsPerUnit
boundCenter = bound.center()
bound.min = boundCenter - newBoundSize / 2.0
bound.max = boundCenter + newBoundSize / 2.0
self.__gadgetWidget.getViewportGadget().frame( bound )
def __buttonDoubleClick( self, widget, event ) :
nodeGadget = self.__nodeGadgetAt( event.line.p1 )
if nodeGadget is not None :
return self.nodeDoubleClickSignal()( self, nodeGadget.node() )
def __dragEnter( self, widget, event ) :
if event.sourceWidget is self.__gadgetWidget :
return False
if self.__dropNodes( event.data ) :
return True
return False
def __drop( self, widget, event ) :
if event.sourceWidget is self.__gadgetWidget :
return False
dropNodes = self.__dropNodes( event.data )
if dropNodes :
self.__frame( dropNodes )
return True
return False
def __dropNodes( self, dragData ) :
if isinstance( dragData, Gaffer.Node ) :
return [ dragData ]
elif isinstance( dragData, Gaffer.Plug ) :
return [ dragData.node() ]
elif isinstance( dragData, Gaffer.Set ) :
return [ x for x in dragData if isinstance( x, Gaffer.Node ) ]
return []
def __currentFrame( self ) :
camera = self.graphGadgetWidget().getViewportGadget().getCamera()
frame = camera.parameters()["screenWindow"].value
translation = camera.getTransform().matrix.translation()
frame.min += IECore.V2f( translation.x, translation.y )
frame.max += IECore.V2f( translation.x, translation.y )
return frame
def __rootChanged( self, graphGadget, previousRoot ) :
# save/restore the current framing so jumping in
# and out of Boxes isn't a confusing experience.
Gaffer.Metadata.registerNodeValue( previousRoot, "ui:nodeGraph:framing", self.__currentFrame(), persistent = False )
frame = Gaffer.Metadata.nodeValue( self.graphGadget().getRoot(), "ui:nodeGraph:framing" )
if frame is not None :
self.graphGadgetWidget().getViewportGadget().frame(
IECore.Box3f( IECore.V3f( frame.min.x, frame.min.y, 0 ), IECore.V3f( frame.max.x, frame.max.y, 0 ) )
)
else :
self.__frame( self.graphGadget().getRoot().children( Gaffer.Node ) )
# do what we need to do to keep our title up to date.
if graphGadget.getRoot().isSame( self.scriptNode() ) :
self.__rootNameChangedConnection = None
self.__rootParentChangedConnection = None
else :
self.__rootNameChangedConnection = graphGadget.getRoot().nameChangedSignal().connect( Gaffer.WeakMethod( self.__rootNameChanged ) )
self.__rootParentChangedConnection = graphGadget.getRoot().parentChangedSignal().connect( Gaffer.WeakMethod( self.__rootParentChanged ) )
self.titleChangedSignal()( self )
def __rootNameChanged( self, root ) :
self.titleChangedSignal()( self )
def __rootParentChanged( self, root, oldParent ) :
# root has been deleted
## \todo I'm not sure if we should be responsible for removing ourselves or not.
# Perhaps we should just signal that we're not valid in some way and the CompoundEditor should
# remove us? Consider how this relates to NodeEditor.__deleteWindow() too.
self.parent().removeChild( self )
def __preRender( self, viewportGadget ) :
# Find all unpositioned nodes.
graphGadget = self.graphGadget()
nodes = [ g.node() for g in graphGadget.unpositionedNodeGadgets() ]
if not nodes :
return
nodes = Gaffer.StandardSet( nodes )
# Lay them out somewhere near the centre of frame.
gadgetWidget = self.graphGadgetWidget()
fallbackPosition = gadgetWidget.getViewportGadget().rasterToGadgetSpace(
IECore.V2f( gadgetWidget.size() ) / 2.0,
gadget = graphGadget
).p0
fallbackPosition = IECore.V2f( fallbackPosition.x, fallbackPosition.y )
graphGadget.getLayout().positionNodes( graphGadget, nodes, fallbackPosition )
graphGadget.getLayout().layoutNodes( graphGadget, nodes )
# And then extend the frame to include them, in case the
# layout has gone off screen.
self.frame( nodes, extend = True )
@classmethod
def __getNodeInputConnectionsVisible( cls, graphGadget, node ) :
return not graphGadget.getNodeInputConnectionsMinimised( node )
@classmethod
def __setNodeInputConnectionsVisible( cls, graphGadget, node, value ) :
with Gaffer.UndoContext( node.ancestor( Gaffer.ScriptNode ) ) :
graphGadget.setNodeInputConnectionsMinimised( node, not value )
@classmethod
def __getNodeOutputConnectionsVisible( cls, graphGadget, node ) :
return not graphGadget.getNodeOutputConnectionsMinimised( node )
@classmethod
def __setNodeOutputConnectionsVisible( cls, graphGadget, node, value ) :
with Gaffer.UndoContext( node.ancestor( Gaffer.ScriptNode ) ) :
graphGadget.setNodeOutputConnectionsMinimised( node, not value )
@classmethod
def __setEnabled( cls, node, value ) :
with Gaffer.UndoContext( node.ancestor( Gaffer.ScriptNode ) ) :
node.enabledPlug().setValue( value )
GafferUI.EditorWidget.registerType( "NodeGraph", NodeGraph )
| |
postgres.go
|
package postgres
import (
"database/sql"
"fmt"
"time"
// import go libpq driver package
_ "github.com/lib/pq"
"github.com/thrasher-corp/gocryptotrader/database"
)
// Connect opens a connection to Postgres database and returns a pointer to database.DB
func
|
() (*database.Instance, error) {
if database.DB.Config.SSLMode == "" {
database.DB.Config.SSLMode = "disable"
}
configDSN := fmt.Sprintf("postgres://%s:%s@%s:%d/%s?sslmode=%s",
database.DB.Config.Username,
database.DB.Config.Password,
database.DB.Config.Host,
database.DB.Config.Port,
database.DB.Config.Database,
database.DB.Config.SSLMode)
db, err := sql.Open(database.DBPostgreSQL, configDSN)
if err != nil {
return nil, err
}
err = db.Ping()
if err != nil {
return nil, err
}
database.DB.SQL = db
database.DB.SQL.SetMaxOpenConns(2)
database.DB.SQL.SetMaxIdleConns(1)
database.DB.SQL.SetConnMaxLifetime(time.Hour)
return database.DB, nil
}
|
Connect
|
encoding_test.go
|
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package encoding_test
import (
"io/ioutil"
"strings"
"testing"
"gx/ipfs/QmVcxhXDbXjNoAdmYBWbY1eU67kQ8eZUHjG4mAYZUtZZu3/go-text/encoding"
"gx/ipfs/QmVcxhXDbXjNoAdmYBWbY1eU67kQ8eZUHjG4mAYZUtZZu3/go-text/encoding/charmap"
"gx/ipfs/QmVcxhXDbXjNoAdmYBWbY1eU67kQ8eZUHjG4mAYZUtZZu3/go-text/transform"
)
func TestEncodeInvalidUTF8(t *testing.T) {
inputs := []string{
"hello.",
"wo\ufffdld.",
"ABC\xff\x80\x80", // Invalid UTF-8.
"\x80\x80\x80\x80\x80",
"\x80\x80D\x80\x80", // Valid rune at "D".
"E\xed\xa0\x80\xed\xbf\xbfF", // Two invalid UTF-8 runes (surrogates).
"G",
"H\xe2\x82", // U+20AC in UTF-8 is "\xe2\x82\xac", which we split over two
"\xacI\xe2\x82", // input lines. It maps to 0x80 in the Windows-1252 encoding.
}
// Each invalid source byte becomes '\x1a'.
want := strings.Replace("hello.wo?ld.ABC??????????D??E??????FGH\x80I??", "?", "\x1a", -1)
transformer := encoding.ReplaceUnsupported(charmap.Windows1252.NewEncoder())
gotBuf := make([]byte, 0, 1024)
src := make([]byte, 0, 1024)
for i, input := range inputs {
dst := make([]byte, 1024)
src = append(src, input...)
atEOF := i == len(inputs)-1
nDst, nSrc, err := transformer.Transform(dst, src, atEOF)
gotBuf = append(gotBuf, dst[:nDst]...)
src = src[nSrc:]
if err != nil && err != transform.ErrShortSrc {
t.Fatalf("i=%d: %v", i, err)
}
if atEOF && err != nil {
t.Fatalf("i=%d: atEOF: %v", i, err)
}
}
if got := string(gotBuf); got != want {
t.Fatalf("\ngot %+q\nwant %+q", got, want)
}
}
func TestReplacement(t *testing.T) {
for _, direction := range []string{"Decode", "Encode"} {
enc, want := (transform.Transformer)(nil), ""
if direction == "Decode" {
enc = encoding.Replacement.NewDecoder()
want = "\ufffd"
} else {
enc = encoding.Replacement.NewEncoder()
want = "AB\x00CD\ufffdYZ"
}
sr := strings.NewReader("AB\x00CD\x80YZ")
g, err := ioutil.ReadAll(transform.NewReader(sr, enc))
if err != nil {
t.Errorf("%s: ReadAll: %v", direction, err)
continue
}
if got := string(g); got != want {
t.Errorf("%s:\ngot %q\nwant %q", direction, got, want)
continue
}
}
}
func TestUTF8Validator(t *testing.T) {
testCases := []struct {
desc string
dstSize int
src string
atEOF bool
want string
wantErr error
}{
{
"empty input",
100,
"",
false,
"",
nil,
},
{
"valid 1-byte 1-rune input",
100,
"a",
false,
"a",
nil,
},
{
"valid 3-byte 1-rune input",
100,
"\u1234",
false,
"\u1234",
nil,
},
{
"valid 5-byte 3-rune input",
100,
"a\u0100\u0101",
false,
"a\u0100\u0101",
nil,
},
{
"perfectly sized dst (non-ASCII)",
5,
"a\u0100\u0101",
false,
"a\u0100\u0101",
nil,
},
{
"short dst (non-ASCII)",
4,
"a\u0100\u0101",
false,
"a\u0100",
transform.ErrShortDst,
},
{
"perfectly sized dst (ASCII)",
5,
"abcde",
false,
"abcde",
nil,
},
{
"short dst (ASCII)",
4,
"abcde",
false,
"abcd",
transform.ErrShortDst,
},
{
"partial input (!EOF)",
100,
"a\u0100\xf1",
false,
"a\u0100",
transform.ErrShortSrc,
},
{
"invalid input (EOF)",
100,
"a\u0100\xf1",
true,
"a\u0100",
encoding.ErrInvalidUTF8,
},
{
"invalid input (!EOF)",
100,
"a\u0100\x80",
false,
"a\u0100",
encoding.ErrInvalidUTF8,
},
{
"invalid input (above U+10FFFF)",
100,
"a\u0100\xf7\xbf\xbf\xbf",
false,
"a\u0100",
encoding.ErrInvalidUTF8,
},
{
"invalid input (surrogate half)",
100,
"a\u0100\xed\xa0\x80",
false,
"a\u0100",
encoding.ErrInvalidUTF8,
},
}
for _, tc := range testCases {
dst := make([]byte, tc.dstSize)
nDst, nSrc, err := encoding.UTF8Validator.Transform(dst, []byte(tc.src), tc.atEOF)
if nDst < 0 || len(dst) < nDst {
t.Errorf("%s: nDst=%d out of range", tc.desc, nDst)
continue
}
got := string(dst[:nDst])
if got != tc.want || nSrc != len(tc.want) || err != tc.wantErr {
t.Errorf("%s:\ngot %+q, %d, %v\nwant %+q, %d, %v",
tc.desc, got, nSrc, err, tc.want, len(tc.want), tc.wantErr)
continue
}
}
}
func TestErrorHandler(t *testing.T) {
testCases := []struct {
desc string
handler func(*encoding.Encoder) *encoding.Encoder
sizeDst int
src, want string
nSrc int
err error
}{
{
desc: "one rune replacement",
handler: encoding.ReplaceUnsupported,
sizeDst: 100,
src: "\uAC00",
want: "\x1a",
nSrc: 3,
},
{
desc: "mid-stream rune replacement",
handler: encoding.ReplaceUnsupported,
sizeDst: 100,
src: "a\uAC00bcd\u00e9",
want: "a\x1abcd\xe9",
nSrc: 9,
},
{
desc: "at end rune replacement",
handler: encoding.ReplaceUnsupported,
sizeDst: 10,
src: "\u00e9\uAC00",
want: "\xe9\x1a",
nSrc: 5,
},
{
desc: "short buffer replacement",
handler: encoding.ReplaceUnsupported,
sizeDst: 1,
src: "\u00e9\uAC00",
want: "\xe9",
nSrc: 2,
err: transform.ErrShortDst,
},
|
desc: "one rune html escape",
handler: encoding.HTMLEscapeUnsupported,
sizeDst: 100,
src: "\uAC00",
want: "가",
nSrc: 3,
},
{
desc: "mid-stream html escape",
handler: encoding.HTMLEscapeUnsupported,
sizeDst: 100,
src: "\u00e9\uAC00dcba",
want: "\xe9가dcba",
nSrc: 9,
},
{
desc: "short buffer html escape",
handler: encoding.HTMLEscapeUnsupported,
sizeDst: 9,
src: "ab\uAC01",
want: "ab",
nSrc: 2,
err: transform.ErrShortDst,
},
}
for i, tc := range testCases {
tr := tc.handler(charmap.Windows1250.NewEncoder())
b := make([]byte, tc.sizeDst)
nDst, nSrc, err := tr.Transform(b, []byte(tc.src), true)
if err != tc.err {
t.Errorf("%d:%s: error was %v; want %v", i, tc.desc, err, tc.err)
}
if got := string(b[:nDst]); got != tc.want {
t.Errorf("%d:%s: result was %q: want %q", i, tc.desc, got, tc.want)
}
if nSrc != tc.nSrc {
t.Errorf("%d:%s: nSrc was %d; want %d", i, tc.desc, nSrc, tc.nSrc)
}
}
}
|
{
|
password_generator.py
|
import random
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
symbols = ['!', '#', '$', '%', '&', '(', ')', '*', '+']
print("Welcome to PyPassword Generator")
num_letters = int(input("Enter No.letters would you like in PyPassword : "))
num_symbols = int(input("Enter No.symbols would you like in PyPassword : "))
num_numbers = int(input("Enter No.numbers would you like in PyPassword : "))
# Easy version - fhfh^&23 (order not randomized)
password = ""
for char in range(1,num_letters+1):
password += random.choice(letters)
for char in range(1,num_symbols+1):
password += random.choice(symbols)
for char in range(1,num_numbers+1):
password += random.choice(numbers)
|
# Advanced version - g^2j8k& (random order)
password_list = []
for char in range(1,num_letters+1):
password_list.append(random.choice(letters))
for char in range(1,num_symbols+1):
password_list.append(random.choice(symbols))
for char in range(1,num_numbers+1):
password_list.append(random.choice(numbers))
# print(password_list)
random.shuffle(password_list)
# print(password_list)
password = ""
for char in password_list :
password += char
print(f"Your Password : {password}")
|
# print("password")
|
data_module.py
|
import pytorch_lightning as pl
from torch.utils.data import DataLoader
class plDataModule(pl.LightningDataModule):
def __init__(
self,
train_dataset,
val_dataset,
test_dataset=None,
num_workers=2,
|
train_shuffle=True,
train_batch_size=64,
train_drop_last=False,
val_batch_size=16,
val_shuffle=False,
val_sampler=None,
train_dataloader=None,
val_dataloader=None,
test_dataloader=None,
):
super().__init__()
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.test_dataset = test_dataset
self.num_workers = num_workers
self.train_sampler = train_sampler
self.train_shuffle = train_shuffle
self.train_batch_size = train_batch_size
self.train_drop_last = train_drop_last
self.val_batch_size = val_batch_size
self.val_shuffle = val_shuffle
self.val_sampler = val_sampler
self.created_train_dataloader = train_dataloader
self.created_val_dataloader = val_dataloader
self.created_test_dataloader = test_dataloader
def train_dataloader(self):
if self.created_train_dataloader:
return self.created_train_dataloader
return DataLoader(
self.train_dataset,
batch_size=self.train_batch_size,
sampler=self.train_sampler,
drop_last=self.train_drop_last,
num_workers=self.num_workers,
shuffle=self.train_shuffle if not self.train_sampler else False,
)
def val_dataloader(self):
if self.created_val_dataloader:
return self.created_val_dataloader
return DataLoader(
self.val_dataset,
batch_size=self.val_batch_size,
sampler=self.val_sampler,
drop_last=False,
num_workers=self.num_workers,
shuffle=self.val_shuffle if not self.val_sampler else False,
)
def test_dataloader(self):
if self.created_test_dataloader:
return self.created_test_dataloader
if self.test_dataset:
return DataLoader(
self.test_dataset,
batch_size=self.val_batch_size,
sampler=self.val_sampler,
drop_last=False,
num_workers=self.num_workers,
shuffle=self.val_shuffle if not self.val_sampler else False,
)
|
train_sampler=None,
|
index.ts
|
export const users = [
{
id: "1",
name: "Ada Lovelace",
birthDate: "1815-12-10",
username: "@ada"
},
{
id: "2",
|
}
];
|
name: "Alan Turing",
birthDate: "1912-06-23",
username: "@complete"
|
cart.tsx
|
import React, { useContext } from 'react';
import MainLayout from '@/layouts/MainLayout';
import Head from 'next/head';
import { store } from '@/store/store';
import Link from 'next/link';
import ProductEditModal from '@/components/ProductEditModal';
import { IProduct } from '@/interfaces/product.interface';
import { numberFormat } from '@/utils/number-format';
import {
OpenEditCart,
RemoveCartItem,
ClearShoppingCart,
} from '../store/action';
|
state: { cart },
dispatch,
} = useContext(store);
const handleEditCart = (product: IProduct) => {
dispatch(OpenEditCart(product) as any);
};
const handleRemoveItem = (product: IProduct) => {
if (
// eslint-disable-next-line no-alert
window.confirm(
`Are you sure you wanted to remove: ${product.name} form shopping cart?`,
)
) {
dispatch(RemoveCartItem(product.id) as any);
}
};
const handleCheckout = () => {
dispatch(ClearShoppingCart() as any);
};
return (
<MainLayout>
<Head>
<title>Central e-commerce | Cart</title>
<link rel="icon" href="/favicon.ico" />
</Head>
<div className="card">
<div className="table-responsive">
<table className="table cart">
<thead>
<tr>
<th>#</th>
<th>Image</th>
<th className="text-left">Product name</th>
<th>@Qty</th>
<th className="text-right">Item price</th>
<th className="text-right">Total price</th>
<th className="text-right">Actions</th>
</tr>
</thead>
<tbody>
{cart?.products.length === 0 ? (
<tr>
<td colSpan={7}>
<p className="text-center">
You have no product in shopping cart.
</p>
</td>
</tr>
) : null}
{cart?.products.map((product, index) => (
<tr key={product.id}>
<td className="text-center">{index + 1}</td>
<th>
<img
className="product-image"
src={`${product.image}?fake=${product.name}`}
alt={product.name}
/>
</th>
<td>
<strong>{product.name}</strong>
<br />
<span className="text-italic">{product.color}</span>
</td>
<td className="text-center">{product.quantity}</td>
<td className="text-right">{numberFormat(product.price)}</td>
<td className="text-right">
{numberFormat((product.quantity || 0) * product.price)}
</td>
<td className="text-right">
<button
className="button warning"
type="button"
onClick={() => handleEditCart(product)}
>
✏️
</button>
<button
className="button danger"
type="button"
onClick={() => handleRemoveItem(product)}
>
🗑️
</button>
</td>
</tr>
))}
</tbody>
<tfoot>
<tr>
<td colSpan={4} className="text-right">
Total Tax
</td>
<td colSpan={3} className="text-right">
{numberFormat(cart?.tax || 0)}
</td>
</tr>
<tr>
<td colSpan={4} className="text-right">
Total Price Incl.
</td>
<td colSpan={3} className="text-right">
{numberFormat(cart?.total || 0)}
</td>
</tr>
</tfoot>
</table>
</div>
</div>
<div className="text-right">
<Link href="/thankyou">
<button
className="button primary checkout"
onClick={handleCheckout}
type="button"
disabled={cart?.products.length === 0}
>
Checkout
</button>
</Link>
</div>
<ProductEditModal />
</MainLayout>
);
};
export default Cart;
|
const Cart = () => {
const {
|
translate.rs
|
// Copyright (c) The Diem Core Contributors
// Copyright (c) The Move Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
cfgir::{
self,
ast::{self as G, BasicBlock, BasicBlocks, BlockInfo},
cfg::BlockCFG,
},
diag,
expansion::ast::{AbilitySet, ModuleIdent},
hlir::ast::{self as H, Label, Value, Value_},
parser::ast::{ConstantName, FunctionName, StructName, Var},
shared::{unique_map::UniqueMap, CompilationEnv},
FullyCompiledProgram,
};
use cfgir::ast::LoopInfo;
use move_core_types::{account_address::AccountAddress as MoveAddress, value::MoveValue};
use move_ir_types::location::*;
use move_symbol_pool::Symbol;
use std::{
collections::{BTreeMap, BTreeSet},
mem,
};
//**************************************************************************************************
// Context
//**************************************************************************************************
struct Context<'env> {
env: &'env mut CompilationEnv,
struct_declared_abilities: UniqueMap<ModuleIdent, UniqueMap<StructName, AbilitySet>>,
start: Option<Label>,
loop_begin: Option<Label>,
loop_end: Option<Label>,
next_label: Option<Label>,
label_count: usize,
blocks: BasicBlocks,
block_ordering: BTreeMap<Label, usize>,
// Used for populating block_info
loop_bounds: BTreeMap<Label, G::LoopInfo>,
block_info: Vec<(Label, BlockInfo)>,
}
impl<'env> Context<'env> {
pub fn new(
env: &'env mut CompilationEnv,
pre_compiled_lib: Option<&FullyCompiledProgram>,
modules: &UniqueMap<ModuleIdent, H::ModuleDefinition>,
) -> Self {
let all_modules = modules
.key_cloned_iter()
.chain(pre_compiled_lib.iter().flat_map(|pre_compiled| {
pre_compiled
.hlir
.modules
.key_cloned_iter()
.filter(|(mident, _m)| !modules.contains_key(mident))
}));
let struct_declared_abilities = UniqueMap::maybe_from_iter(
all_modules
.map(|(m, mdef)| (m, mdef.structs.ref_map(|_s, sdef| sdef.abilities.clone()))),
)
.unwrap();
Context {
env,
struct_declared_abilities,
next_label: None,
loop_begin: None,
loop_end: None,
start: None,
label_count: 0,
blocks: BasicBlocks::new(),
block_ordering: BTreeMap::new(),
block_info: vec![],
loop_bounds: BTreeMap::new(),
}
}
fn new_label(&mut self) -> Label {
let count = self.label_count;
self.label_count += 1;
Label(count)
}
fn insert_block(&mut self, lbl: Label, basic_block: BasicBlock) {
assert!(self.block_ordering.insert(lbl, self.blocks.len()).is_none());
assert!(self.blocks.insert(lbl, basic_block).is_none());
let block_info = match self.loop_bounds.get(&lbl) {
None => BlockInfo::Other,
Some(info) => BlockInfo::LoopHead(info.clone()),
};
self.block_info.push((lbl, block_info));
}
// Returns the blocks inserted in insertion ordering
pub fn finish_blocks(&mut self) -> (Label, BasicBlocks, Vec<(Label, BlockInfo)>) {
self.next_label = None;
let start = mem::replace(&mut self.start, None);
let blocks = mem::take(&mut self.blocks);
let block_ordering = mem::take(&mut self.block_ordering);
let block_info = mem::take(&mut self.block_info);
self.loop_bounds = BTreeMap::new();
self.label_count = 0;
self.loop_begin = None;
self.loop_end = None;
// Blocks will eventually be ordered and outputted to bytecode the label. But labels are
// initially created depth first
// So the labels need to be remapped based on the insertion order of the block
// This preserves the original layout of the code as specified by the user (since code is
// finshed+inserted into the map in original code order)
let remapping = block_ordering
.into_iter()
.map(|(lbl, ordering)| (lbl, Label(ordering)))
.collect();
let (start, blocks) = G::remap_labels(&remapping, start.unwrap(), blocks);
let block_info = block_info
.into_iter()
.map(|(lbl, info)| {
let info = match info {
BlockInfo::Other => BlockInfo::Other,
BlockInfo::LoopHead(G::LoopInfo {
is_loop_stmt,
loop_end,
}) => {
let loop_end = match loop_end {
G::LoopEnd::Unused => G::LoopEnd::Unused,
G::LoopEnd::Target(end) if remapping.contains_key(&end) => {
G::LoopEnd::Target(remapping[&end])
}
G::LoopEnd::Target(_end) => G::LoopEnd::Unused,
};
BlockInfo::LoopHead(G::LoopInfo {
is_loop_stmt,
loop_end,
})
}
};
(remapping[&lbl], info)
})
.collect();
(start, blocks, block_info)
}
}
//**************************************************************************************************
// Entry
//**************************************************************************************************
pub fn program(
compilation_env: &mut CompilationEnv,
pre_compiled_lib: Option<&FullyCompiledProgram>,
prog: H::Program,
) -> G::Program {
let H::Program {
modules: hmodules,
scripts: hscripts,
} = prog;
let mut context = Context::new(compilation_env, pre_compiled_lib, &hmodules);
let modules = modules(&mut context, hmodules);
let scripts = scripts(&mut context, hscripts);
G::Program { modules, scripts }
}
fn modules(
context: &mut Context,
hmodules: UniqueMap<ModuleIdent, H::ModuleDefinition>,
) -> UniqueMap<ModuleIdent, G::ModuleDefinition> {
let modules = hmodules
.into_iter()
.map(|(mname, m)| module(context, mname, m));
UniqueMap::maybe_from_iter(modules).unwrap()
}
fn module(
context: &mut Context,
module_ident: ModuleIdent,
mdef: H::ModuleDefinition,
) -> (ModuleIdent, G::ModuleDefinition) {
let H::ModuleDefinition {
package_name,
attributes,
is_source_module,
dependency_order,
friends,
structs,
functions: hfunctions,
constants: hconstants,
} = mdef;
let constants = hconstants.map(|name, c| constant(context, name, c));
let functions = hfunctions.map(|name, f| function(context, name, f));
(
module_ident,
G::ModuleDefinition {
package_name,
attributes,
is_source_module,
dependency_order,
friends,
structs,
constants,
functions,
},
)
}
fn scripts(
context: &mut Context,
hscripts: BTreeMap<Symbol, H::Script>,
) -> BTreeMap<Symbol, G::Script> {
hscripts
.into_iter()
.map(|(n, s)| (n, script(context, s)))
.collect()
}
fn script(context: &mut Context, hscript: H::Script) -> G::Script {
let H::Script {
package_name,
attributes,
loc,
constants: hconstants,
function_name,
function: hfunction,
} = hscript;
let constants = hconstants.map(|name, c| constant(context, name, c));
let function = function(context, function_name, hfunction);
G::Script {
package_name,
attributes,
loc,
constants,
function_name,
function,
}
}
//**************************************************************************************************
// Functions
//**************************************************************************************************
fn constant(context: &mut Context, _name: ConstantName, c: H::Constant) -> G::Constant {
let H::Constant {
attributes,
loc,
signature,
value: (locals, block),
} = c;
let final_value = constant_(context, loc, signature.clone(), locals, block);
let value = final_value.and_then(move_value_from_exp);
G::Constant {
attributes,
loc,
signature,
value,
}
}
const CANNOT_FOLD: &str =
"Invalid expression in 'const'. This expression could not be evaluated to a value";
fn constant_(
context: &mut Context,
full_loc: Loc,
signature: H::BaseType,
locals: UniqueMap<Var, H::SingleType>,
block: H::Block,
) -> Option<H::Exp> {
use H::Command_ as C;
const ICE_MSG: &str = "ICE invalid constant should have been blocked in typing";
initial_block(context, block);
let (start, mut blocks, block_info) = context.finish_blocks();
let (mut cfg, infinite_loop_starts, errors) = BlockCFG::new(start, &mut blocks, &block_info);
assert!(infinite_loop_starts.is_empty(), "{}", ICE_MSG);
assert!(errors.is_empty(), "{}", ICE_MSG);
let num_previous_errors = context.env.count_diags();
let fake_signature = H::FunctionSignature {
type_parameters: vec![],
parameters: vec![],
return_type: H::Type_::base(signature),
};
let fake_acquires = BTreeMap::new();
let fake_infinite_loop_starts = BTreeSet::new();
cfgir::refine_inference_and_verify(
context.env,
&context.struct_declared_abilities,
&fake_signature,
&fake_acquires,
&locals,
&mut cfg,
&fake_infinite_loop_starts,
);
assert!(
num_previous_errors == context.env.count_diags(),
"{}",
ICE_MSG
);
cfgir::optimize(&fake_signature, &locals, &mut cfg);
if blocks.len() != 1 {
context.env.add_diag(diag!(
BytecodeGeneration::UnfoldableConstant,
(full_loc, CANNOT_FOLD)
));
return None;
}
let mut optimized_block = blocks.remove(&start).unwrap();
let return_cmd = optimized_block.pop_back().unwrap();
for sp!(cloc, cmd_) in &optimized_block {
let e = match cmd_ {
C::IgnoreAndPop { exp, .. } => exp,
_ => {
context.env.add_diag(diag!(
BytecodeGeneration::UnfoldableConstant,
(*cloc, CANNOT_FOLD)
));
continue;
}
};
check_constant_value(context, e)
}
let result = match return_cmd.value {
C::Return { exp: e, .. } => e,
_ => unreachable!(),
};
check_constant_value(context, &result);
Some(result)
}
fn check_constant_value(context: &mut Context, e: &H::Exp) {
use H::UnannotatedExp_ as E;
match &e.exp.value {
E::Value(_) => (),
_ => context.env.add_diag(diag!(
BytecodeGeneration::UnfoldableConstant,
(e.exp.loc, CANNOT_FOLD)
)),
}
}
fn move_value_from_exp(e: H::Exp) -> Option<MoveValue> {
use H::UnannotatedExp_ as E;
match e.exp.value {
E::Value(v) => Some(move_value_from_value(v)),
_ => None,
}
}
pub(crate) fn move_value_from_value(sp!(_, v_): Value) -> MoveValue {
move_value_from_value_(v_)
}
pub(crate) fn move_value_from_value_(v_: Value_) -> MoveValue {
use MoveValue as MV;
use Value_ as V;
match v_ {
V::Address(a) => MV::Address(MoveAddress::new(a.into_bytes())),
V::U8(u) => MV::U8(u),
V::U64(u) => MV::U64(u),
V::U128(u) => MV::U128(u),
V::Bool(b) => MV::Bool(b),
V::Vector(_, vs) => MV::Vector(vs.into_iter().map(move_value_from_value).collect()),
}
}
//**************************************************************************************************
// Functions
//**************************************************************************************************
fn function(context: &mut Context, _name: FunctionName, f: H::Function) -> G::Function {
let attributes = f.attributes;
let visibility = f.visibility;
let signature = f.signature;
let acquires = f.acquires;
let body = function_body(context, &signature, &acquires, f.body);
G::Function {
attributes,
visibility,
signature,
acquires,
body,
}
}
fn function_body(
context: &mut Context,
signature: &H::FunctionSignature,
acquires: &BTreeMap<StructName, Loc>,
sp!(loc, tb_): H::FunctionBody,
) -> G::FunctionBody {
use G::FunctionBody_ as GB;
use H::FunctionBody_ as HB;
assert!(context.next_label.is_none());
assert!(context.start.is_none());
assert!(context.blocks.is_empty());
assert!(context.block_ordering.is_empty());
assert!(context.block_info.is_empty());
assert!(context.loop_bounds.is_empty());
assert!(context.loop_begin.is_none());
assert!(context.loop_end.is_none());
let b_ = match tb_ {
HB::Native => GB::Native,
HB::Defined { locals, body } => {
initial_block(context, body);
let (start, mut blocks, block_info) = context.finish_blocks();
let (mut cfg, infinite_loop_starts, diags) =
BlockCFG::new(start, &mut blocks, &block_info);
context.env.add_diags(diags);
cfgir::refine_inference_and_verify(
context.env,
&context.struct_declared_abilities,
signature,
acquires,
&locals,
&mut cfg,
&infinite_loop_starts,
);
if !context.env.has_diags() {
cfgir::optimize(signature, &locals, &mut cfg);
}
let loop_heads = block_info
.into_iter()
.filter(|(lbl, info)| {
matches!(info, BlockInfo::LoopHead(_)) && blocks.contains_key(lbl)
})
.map(|(lbl, _info)| lbl)
.collect();
GB::Defined {
locals,
start,
loop_heads,
blocks,
}
}
};
sp(loc, b_)
}
//**************************************************************************************************
// Statements
//**************************************************************************************************
fn initial_block(context: &mut Context, blocks: H::Block) {
let start = context.new_label();
context.start = Some(start);
block(context, start, blocks)
}
fn block(context: &mut Context, mut cur_label: Label, blocks: H::Block) {
use H::Command_ as C;
assert!(!blocks.is_empty());
let loc = blocks.back().unwrap().loc;
let mut basic_block = block_(context, &mut cur_label, blocks);
// return if we ended with did not end with a command
if basic_block.is_empty() {
return;
}
match context.next_label {
Some(next) if !basic_block.back().unwrap().value.is_terminal() => {
basic_block.push_back(sp(
loc,
C::Jump {
target: next,
from_user: false,
},
));
}
_ => (),
}
context.insert_block(cur_label, basic_block);
}
fn block_(context: &mut Context, cur_label: &mut Label, blocks: H::Block) -> BasicBlock {
use H::{Command_ as C, Statement_ as S};
assert!(!blocks.is_empty());
let mut basic_block = BasicBlock::new();
macro_rules! finish_block {
(next_label: $next_label:expr) => {{
let lbl = mem::replace(cur_label, $next_label);
let bb = mem::take(&mut basic_block);
context.insert_block(lbl, bb);
}};
}
macro_rules! loop_block {
(begin: $begin:expr, end: $end:expr, body: $body:expr, $block:expr) => {{
let begin = $begin;
let old_begin = mem::replace(&mut context.loop_begin, Some(begin));
let old_end = mem::replace(&mut context.loop_end, Some($end));
let old_next = mem::replace(&mut context.next_label, Some(begin));
block(context, $body, $block);
context.next_label = old_next;
context.loop_end = old_end;
context.loop_begin = old_begin;
}};
}
for sp!(loc, stmt_) in blocks {
match stmt_ {
S::Command(mut cmd) => {
command(context, &mut cmd);
let is_terminal = cmd.value.is_terminal();
basic_block.push_back(cmd);
if is_terminal
|
}
S::IfElse {
cond,
if_block,
else_block,
} => {
let if_true = context.new_label();
let if_false = context.new_label();
let next_label = context.new_label();
// If cond
let jump_if = C::JumpIf {
cond: *cond,
if_true,
if_false,
};
basic_block.push_back(sp(loc, jump_if));
finish_block!(next_label: next_label);
// If branches
let old_next = mem::replace(&mut context.next_label, Some(next_label));
block(context, if_true, if_block);
block(context, if_false, else_block);
context.next_label = old_next;
}
S::While {
cond: (hcond_block, cond),
block: loop_block,
} => {
let loop_cond = context.new_label();
let loop_body = context.new_label();
let loop_end = context.new_label();
context.loop_bounds.insert(
loop_cond,
LoopInfo {
is_loop_stmt: false,
loop_end: G::LoopEnd::Target(loop_end),
},
);
// Jump to loop condition
basic_block.push_back(sp(
loc,
C::Jump {
target: loop_cond,
from_user: false,
},
));
finish_block!(next_label: loop_cond);
// Loop condition and case to jump into loop or end
if !hcond_block.is_empty() {
assert!(basic_block.is_empty());
basic_block = block_(context, cur_label, hcond_block);
}
let jump_if = C::JumpIf {
cond: *cond,
if_true: loop_body,
if_false: loop_end,
};
basic_block.push_back(sp(loc, jump_if));
finish_block!(next_label: loop_end);
// Loop body
loop_block!(begin: loop_cond, end: loop_end, body: loop_body, loop_block)
}
S::Loop {
block: loop_block, ..
} => {
let loop_body = context.new_label();
let loop_end = context.new_label();
assert!(cur_label.0 < loop_body.0);
assert!(loop_body.0 < loop_end.0);
context.loop_bounds.insert(
loop_body,
LoopInfo {
is_loop_stmt: true,
loop_end: G::LoopEnd::Target(loop_end),
},
);
// Jump to loop
basic_block.push_back(sp(
loc,
C::Jump {
target: loop_body,
from_user: false,
},
));
finish_block!(next_label: loop_end);
// Loop body
loop_block!(begin: loop_body, end: loop_end, body: loop_body, loop_block)
}
}
}
basic_block
}
fn command(context: &Context, sp!(_, hc_): &mut H::Command) {
use H::Command_ as C;
match hc_ {
C::Assign(_, _)
| C::Mutate(_, _)
| C::Abort(_)
| C::Return { .. }
| C::IgnoreAndPop { .. } => {}
C::Continue => {
*hc_ = C::Jump {
target: context.loop_begin.unwrap(),
from_user: true,
}
}
C::Break => {
*hc_ = C::Jump {
target: context.loop_end.unwrap(),
from_user: true,
}
}
C::Jump { .. } | C::JumpIf { .. } => {
panic!("ICE unexpected jump before translation to jumps")
}
}
}
|
{
finish_block!(next_label: context.new_label());
}
|
client_monitored_item_impl.js
|
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.ClientMonitoredItemImpl = void 0;
/**
* @module node-opcua-client-private
*/
// tslint:disable:unified-signatures
// tslint:disable:no-empty
const events_1 = require("events");
const node_opcua_assert_1 = require("node-opcua-assert");
const node_opcua_data_model_1 = require("node-opcua-data-model");
const node_opcua_data_value_1 = require("node-opcua-data-value");
const node_opcua_debug_1 = require("node-opcua-debug");
const node_opcua_service_filter_1 = require("node-opcua-service-filter");
const node_opcua_service_read_1 = require("node-opcua-service-read");
const node_opcua_service_subscription_1 = require("node-opcua-service-subscription");
const node_opcua_status_code_1 = require("node-opcua-status-code");
const client_monitored_item_1 = require("../client_monitored_item");
const client_monitored_item_toolbox_1 = require("../client_monitored_item_toolbox");
const client_subscription_impl_1 = require("./client_subscription_impl");
const debugLog = (0, node_opcua_debug_1.make_debugLog)(__filename);
const doDebug = (0, node_opcua_debug_1.checkDebugFlag)(__filename);
/**
* ClientMonitoredItem
* @class ClientMonitoredItem
* @extends ClientMonitoredItemBase
*
* event:
* "initialized"
* "err"
* "changed"
*
* note: this.monitoringMode = subscription_service.MonitoringMode.Reporting;
*/
class ClientMonitoredItemImpl extends events_1.EventEmitter {
constructor(subscription, itemToMonitor, monitoringParameters, timestampsToReturn, monitoringMode = node_opcua_service_subscription_1.MonitoringMode.Reporting) {
super();
this.statusCode = node_opcua_status_code_1.StatusCodes.BadDataUnavailable;
(0, node_opcua_assert_1.assert)(subscription.constructor.name === "ClientSubscriptionImpl");
this.subscription = subscription;
this.itemToMonitor = new node_opcua_service_read_1.ReadValueId(itemToMonitor);
this.monitoringParameters = new node_opcua_service_subscription_1.MonitoringParameters(monitoringParameters);
this.monitoringMode = monitoringMode;
(0, node_opcua_assert_1.assert)(this.monitoringParameters.clientHandle === 0xffffffff, "should not have a client handle yet");
(0, node_opcua_assert_1.assert)(subscription.session, "expecting session");
timestampsToReturn = (0, node_opcua_data_value_1.coerceTimestampsToReturn)(timestampsToReturn);
(0, node_opcua_assert_1.assert)(subscription.constructor.name === "ClientSubscriptionImpl");
this.timestampsToReturn = timestampsToReturn;
}
toString() {
var _a, _b;
let ret = "";
ret += "itemToMonitor: " + this.itemToMonitor.toString() + "\n";
ret += "monitoringParameters: " + this.monitoringParameters.toString() + "\n";
ret += "timestampsToReturn: " + this.timestampsToReturn.toString() + "\n";
ret += "itemToMonitor " + this.itemToMonitor.nodeId + "\n";
ret += "statusCode " + ((_a = this.statusCode) === null || _a === void 0 ? void 0 : _a.toString()) + "\n";
ret += "result =" + ((_b = this.result) === null || _b === void 0 ? void 0 : _b.toString()) + "\n";
return ret;
}
terminate(...args) {
const done = args[0];
(0, node_opcua_assert_1.assert)(typeof done === "function");
const subscription = this.subscription;
subscription._delete_monitored_items([this], (err) => {
if (done) {
done(err);
}
});
}
modify(...args) {
if (args.length === 2) {
return this.modify(args[0], null, args[1]);
}
const parameters = args[0];
const timestampsToReturn = args[1];
const callback = args[2];
this.timestampsToReturn = timestampsToReturn || this.timestampsToReturn;
client_monitored_item_toolbox_1.ClientMonitoredItemToolbox._toolbox_modify(this.subscription, [this], parameters, this.timestampsToReturn, (err, results) => {
if (err) {
return callback(err);
}
if (!results) {
return callback(new Error("internal error"));
}
(0, node_opcua_assert_1.assert)(results.length === 1);
callback(null, results[0]);
});
}
setMonitoringMode(...args) {
const monitoringMode = args[0];
const callback = args[1];
client_monitored_item_toolbox_1.ClientMonitoredItemToolbox._toolbox_setMonitoringMode(this.subscription, [this], monitoringMode, (err, statusCodes) => {
callback(err ? err : null, statusCodes[0]);
});
}
/**
|
* @private
*/
_notify_value_change(value) {
var _a;
// it is possible that the first notification arrives before the CreateMonitoredItemsRequest is fully proceed
// in this case we need to put the dataValue aside so we can send the notification changed after
// the node-opcua client had time to fully install the on("changed") event handler
if (((_a = this.statusCode) === null || _a === void 0 ? void 0 : _a.value) === node_opcua_status_code_1.StatusCodes.BadDataUnavailable.value) {
this._pendingDataValue = this._pendingDataValue || [];
this._pendingDataValue.push(value);
return;
}
/**
* Notify the observers that the MonitoredItem value has changed on the server side.
* @event changed
* @param value
*/
try {
this.emit("changed", value);
}
catch (err) {
debugLog("Exception raised inside the event handler called by ClientMonitoredItem.on('change')", err);
debugLog("Please verify the application using this node-opcua client");
}
}
/**
* @internal
* @param eventFields
* @private
*/
_notify_event(eventFields) {
var _a;
if (((_a = this.statusCode) === null || _a === void 0 ? void 0 : _a.value) === node_opcua_status_code_1.StatusCodes.BadDataUnavailable.value) {
this._pendingEvents = this._pendingEvents || [];
this._pendingEvents.push(eventFields);
return;
}
/**
* Notify the observers that the MonitoredItem value has changed on the server side.
* @event changed
* @param value
*/
try {
this.emit("changed", eventFields);
}
catch (err) {
debugLog("Exception raised inside the event handler called by ClientMonitoredItem.on('change')", err);
debugLog("Please verify the application using this node-opcua client");
}
}
/**
* @internal
* @private
*/
_prepare_for_monitoring() {
(0, node_opcua_assert_1.assert)(this.monitoringParameters.clientHandle === 4294967295, "should not have a client handle yet");
const subscription = this.subscription;
this.monitoringParameters.clientHandle = subscription.nextClientHandle();
(0, node_opcua_assert_1.assert)(this.monitoringParameters.clientHandle > 0 && this.monitoringParameters.clientHandle !== 4294967295);
// If attributeId is EventNotifier then monitoring parameters need a filter.
// The filter must then either be DataChangeFilter, EventFilter or AggregateFilter.
// todo can be done in another way?
// todo implement AggregateFilter
// todo support DataChangeFilter
// todo support whereClause
if (this.itemToMonitor.attributeId === node_opcua_data_model_1.AttributeIds.EventNotifier) {
//
// see OPCUA Spec 1.02 part 4 page 65 : 5.12.1.4 Filter
// see part 4 page 130: 7.16.3 EventFilter
// part 3 page 11 : 4.6 Event Model
// To monitor for Events, the attributeId element of the ReadValueId structure is the
// the id of the EventNotifierAttribute
// OPC Unified Architecture 1.02, Part 4 5.12.1.2 Sampling interval page 64:
// "A Client shall define a sampling interval of 0 if it subscribes for Events."
// toDO
// note : the EventFilter is used when monitoring Events.
this.monitoringParameters.filter = this.monitoringParameters.filter || new node_opcua_service_filter_1.EventFilter({});
const filter = this.monitoringParameters.filter;
// istanbul ignore next
if (!filter) {
return { error: "Internal Error" };
}
if (filter.schema.name !== "EventFilter") {
return {
error: "Mismatch between attributeId and filter in monitoring parameters : " +
"Got a " +
filter.schema.name +
" but a EventFilter object is required " +
"when itemToMonitor.attributeId== AttributeIds.EventNotifier"
};
}
}
else if (this.itemToMonitor.attributeId === node_opcua_data_model_1.AttributeIds.Value) {
// the DataChangeFilter and the AggregateFilter are used when monitoring Variable Values
// The Value Attribute is used when monitoring Variables. Variable values are monitored for a change
// in value or a change in their status. The filters defined in this standard (see 7.16.2) and in Part 8 are
// used to determine if the value change is large enough to cause a Notification to be generated for the
// to do : check 'DataChangeFilter' && 'AggregateFilter'
}
else {
if (this.monitoringParameters.filter) {
return {
error: "Mismatch between attributeId and filter in monitoring parameters : " +
"no filter expected when attributeId is not Value or EventNotifier"
};
}
}
return {
itemToMonitor: this.itemToMonitor,
monitoringMode: this.monitoringMode,
requestedParameters: this.monitoringParameters
};
}
/**
* @internal
* @param monitoredItemResult
* @private
*/
_applyResult(monitoredItemResult) {
this.statusCode = monitoredItemResult.statusCode;
/* istanbul ignore else */
if (monitoredItemResult.statusCode === node_opcua_status_code_1.StatusCodes.Good) {
this.result = monitoredItemResult;
this.monitoredItemId = monitoredItemResult.monitoredItemId;
this.monitoringParameters.samplingInterval = monitoredItemResult.revisedSamplingInterval;
this.monitoringParameters.queueSize = monitoredItemResult.revisedQueueSize;
this.filterResult = monitoredItemResult.filterResult || undefined;
}
// some PublishRequest with DataNotificationChange might have been sent by the server, before the monitored
// item has been fully initialized it is time to process now any pending notification that were put on hold.
if (this._pendingDataValue) {
const dataValues = this._pendingDataValue;
this._pendingDataValue = undefined;
setImmediate(() => {
dataValues.map((dataValue) => this._notify_value_change(dataValue));
});
}
if (this._pendingEvents) {
const events = this._pendingEvents;
this._pendingEvents = undefined;
setImmediate(() => {
events.map((event) => this._notify_event(event));
});
}
}
_before_create() {
const subscription = this.subscription;
subscription._add_monitored_item(this.monitoringParameters.clientHandle, this);
}
/**
* @internal
* @param monitoredItemResult
* @private
*/
_after_create(monitoredItemResult) {
this._applyResult(monitoredItemResult);
if (this.statusCode === node_opcua_status_code_1.StatusCodes.Good) {
/**
* Notify the observers that the monitored item is now fully initialized.
* @event initialized
*/
this.emit("initialized");
}
else {
/**
* Notify the observers that the monitored item has failed to initialize.
* @event err
* @param statusCode {StatusCode}
*/
const err = new Error(monitoredItemResult.statusCode.toString());
this._terminate_and_emit(err);
}
}
_terminate_and_emit(err) {
if (this.statusCode.value === node_opcua_status_code_1.StatusCodes.Bad.value) {
return; // already terminated
}
if (err) {
this.emit("err", err.message);
}
(0, node_opcua_assert_1.assert)(!this._terminated);
this._terminated = true;
/**
* Notify the observer that this monitored item has been terminated.
* @event terminated
*/
this.emit("terminated", err);
this.removeAllListeners();
this.statusCode = node_opcua_status_code_1.StatusCodes.Bad;
// also remove from subscription
const clientHandle = this.monitoringParameters.clientHandle;
delete this.subscription.monitoredItems[clientHandle];
}
}
exports.ClientMonitoredItemImpl = ClientMonitoredItemImpl;
// tslint:disable:no-var-requires
// tslint:disable:max-line-length
const thenify = require("thenify");
const opts = { multiArgs: false };
ClientMonitoredItemImpl.prototype.terminate = thenify.withCallback(ClientMonitoredItemImpl.prototype.terminate);
ClientMonitoredItemImpl.prototype.setMonitoringMode = thenify.withCallback(ClientMonitoredItemImpl.prototype.setMonitoringMode);
ClientMonitoredItemImpl.prototype.modify = thenify.withCallback(ClientMonitoredItemImpl.prototype.modify);
client_monitored_item_1.ClientMonitoredItem.create = (subscription, itemToMonitor, monitoringParameters, timestampsToReturn, monitoringMode = node_opcua_service_subscription_1.MonitoringMode.Reporting) => {
return (0, client_subscription_impl_1.ClientMonitoredItem_create)(subscription, itemToMonitor, monitoringParameters, timestampsToReturn, monitoringMode);
};
//# sourceMappingURL=client_monitored_item_impl.js.map
|
* @internal
* @param value
|
LinkClickedEventHandler.py
|
class LinkClickedEventHandler(MulticastDelegate,ICloneable,ISerializable):
"""
Represents the method that will handle the System.Windows.Forms.RichTextBox.LinkClicked event of a System.Windows.Forms.RichTextBox.
LinkClickedEventHandler(object: object,method: IntPtr)
"""
def BeginInvoke(self,sender,e,callback,object):
""" BeginInvoke(self: LinkClickedEventHandler,sender: object,e: LinkClickedEventArgs,callback: AsyncCallback,object: object) -> IAsyncResult """
pass
def CombineImpl(self,*args):
"""
CombineImpl(self: MulticastDelegate,follow: Delegate) -> Delegate
Combines this System.Delegate with the specified System.Delegate to form a new delegate.
|
Returns: A delegate that is the new root of the System.MulticastDelegate invocation list.
"""
pass
def DynamicInvokeImpl(self,*args):
"""
DynamicInvokeImpl(self: Delegate,args: Array[object]) -> object
Dynamically invokes (late-bound) the method represented by the current delegate.
args: An array of objects that are the arguments to pass to the method represented by the current
delegate.-or- null,if the method represented by the current delegate does not require
arguments.
Returns: The object returned by the method represented by the delegate.
"""
pass
def EndInvoke(self,result):
""" EndInvoke(self: LinkClickedEventHandler,result: IAsyncResult) """
pass
def GetMethodImpl(self,*args):
"""
GetMethodImpl(self: MulticastDelegate) -> MethodInfo
Returns a static method represented by the current System.MulticastDelegate.
Returns: A static method represented by the current System.MulticastDelegate.
"""
pass
def Invoke(self,sender,e):
""" Invoke(self: LinkClickedEventHandler,sender: object,e: LinkClickedEventArgs) """
pass
def RemoveImpl(self,*args):
"""
RemoveImpl(self: MulticastDelegate,value: Delegate) -> Delegate
Removes an element from the invocation list of this System.MulticastDelegate that is equal to
the specified delegate.
value: The delegate to search for in the invocation list.
Returns: If value is found in the invocation list for this instance,then a new System.Delegate without
value in its invocation list; otherwise,this instance with its original invocation list.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,object,method):
""" __new__(cls: type,object: object,method: IntPtr) """
pass
def __reduce_ex__(self,*args):
pass
|
follow: The delegate to combine with this delegate.
|
client.rs
|
use std::fmt;
use std::sync::Arc;
use std::time::Duration;
use std::thread;
use std::net::IpAddr;
use futures::{Async, Future, Stream};
use futures::future::{self, Either};
use futures::sync::{mpsc, oneshot};
use request::{Request, RequestBuilder};
use response::Response;
use {async_impl, header, Method, IntoUrl, Proxy, RedirectPolicy, wait};
#[cfg(feature = "tls")]
use {Certificate, Identity};
#[cfg(feature = "trust-dns")]
use {LookupIpStrategy};
/// A `Client` to make Requests with.
///
/// The Client has various configuration values to tweak, but the defaults
/// are set to what is usually the most commonly desired value. To configure a
/// `Client`, use `Client::builder()`.
///
/// The `Client` holds a connection pool internally, so it is advised that
/// you create one and **reuse** it.
///
/// # Examples
///
/// ```rust
/// # use reqwest::{Error, Client};
/// #
/// # fn run() -> Result<(), Error> {
/// let client = Client::new();
/// let resp = client.get("http://httpbin.org/").send()?;
/// # drop(resp);
/// # Ok(())
/// # }
///
/// ```
#[derive(Clone)]
pub struct Client {
inner: ClientHandle,
}
/// A `ClientBuilder` can be used to create a `Client` with custom configuration.
///
/// # Example
///
/// ```
/// # fn run() -> Result<(), reqwest::Error> {
/// use std::time::Duration;
///
/// let client = reqwest::Client::builder()
/// .gzip(true)
/// .timeout(Duration::from_secs(10))
/// .build()?;
/// # Ok(())
/// # }
/// ```
pub struct ClientBuilder {
inner: async_impl::ClientBuilder,
timeout: Timeout,
}
impl ClientBuilder {
/// Constructs a new `ClientBuilder`.
///
/// This is the same as `Client::builder()`.
pub fn new() -> ClientBuilder {
ClientBuilder {
inner: async_impl::ClientBuilder::new(),
timeout: Timeout::default(),
}
}
/// Returns a `Client` that uses this `ClientBuilder` configuration.
///
/// # Errors
///
/// This method fails if TLS backend cannot be initialized, or the resolver
/// cannot load the system configuration.
pub fn build(self) -> ::Result<Client> {
ClientHandle::new(self).map(|handle| Client {
inner: handle,
})
}
/// Set that all sockets have `SO_NODELAY` set to `true`.
pub fn tcp_nodelay(self) -> ClientBuilder {
self.with_inner(move |inner| inner.tcp_nodelay())
}
/// Use native TLS backend.
#[cfg(feature = "default-tls")]
pub fn use_default_tls(self) -> ClientBuilder {
self.with_inner(move |inner| inner.use_default_tls())
}
/// Use rustls TLS backend.
#[cfg(feature = "rustls-tls")]
pub fn use_rustls_tls(self) -> ClientBuilder {
self.with_inner(move |inner| inner.use_rustls_tls())
}
/// Add a custom root certificate.
///
/// This allows connecting to a server that has a self-signed
/// certificate for example. This **does not** replace the existing
/// trusted store.
///
/// # Example
/// ```
/// # use std::fs::File;
/// # use std::io::Read;
/// # fn build_client() -> Result<(), Box<std::error::Error>> {
/// // read a local binary DER encoded certificate
/// let mut buf = Vec::new();
/// File::open("my-cert.der")?.read_to_end(&mut buf)?;
///
/// // create a certificate
/// let cert = reqwest::Certificate::from_der(&buf)?;
///
/// // get a client builder
/// let client = reqwest::Client::builder()
/// .add_root_certificate(cert)
/// .build()?;
/// # drop(client);
/// # Ok(())
/// # }
/// ```
///
/// # Errors
///
/// This method fails if adding root certificate was unsuccessful.
#[cfg(feature = "tls")]
pub fn add_root_certificate(self, cert: Certificate) -> ClientBuilder {
self.with_inner(move |inner| inner.add_root_certificate(cert))
}
/// Sets the identity to be used for client certificate authentication.
///
/// # Example
///
/// ```
/// # use std::fs::File;
/// # use std::io::Read;
/// # fn build_client() -> Result<(), Box<std::error::Error>> {
/// // read a local PKCS12 bundle
/// let mut buf = Vec::new();
///
/// #[cfg(feature = "default-tls")]
/// File::open("my-ident.pfx")?.read_to_end(&mut buf)?;
/// #[cfg(feature = "rustls-tls")]
/// File::open("my-ident.pem")?.read_to_end(&mut buf)?;
///
/// #[cfg(feature = "default-tls")]
/// // create an Identity from the PKCS#12 archive
/// let pkcs12 = reqwest::Identity::from_pkcs12_der(&buf, "my-privkey-password")?;
/// #[cfg(feature = "rustls-tls")]
/// // create an Identity from the PEM file
/// let pkcs12 = reqwest::Identity::from_pem(&buf)?;
///
/// // get a client builder
/// let client = reqwest::Client::builder()
/// .identity(pkcs12)
/// .build()?;
/// # drop(client);
/// # Ok(())
/// # }
/// ```
#[cfg(feature = "tls")]
pub fn identity(self, identity: Identity) -> ClientBuilder {
self.with_inner(move |inner| inner.identity(identity))
}
/// Controls the use of hostname verification.
///
/// Defaults to `false`.
///
/// # Warning
///
/// You should think very carefully before you use this method. If
/// hostname verification is not used, any valid certificate for any
/// site will be trusted for use from any other. This introduces a
/// significant vulnerability to man-in-the-middle attacks.
#[cfg(feature = "default-tls")]
pub fn danger_accept_invalid_hostnames(self, accept_invalid_hostname: bool) -> ClientBuilder {
self.with_inner(|inner| inner.danger_accept_invalid_hostnames(accept_invalid_hostname))
}
/// Controls the use of certificate validation.
///
/// Defaults to `false`.
///
/// # Warning
///
/// You should think very carefully before using this method. If
/// invalid certificates are trusted, *any* certificate for *any* site
/// will be trusted for use. This includes expired certificates. This
/// introduces significant vulnerabilities, and should only be used
/// as a last resort.
#[cfg(feature = "tls")]
pub fn danger_accept_invalid_certs(self, accept_invalid_certs: bool) -> ClientBuilder {
self.with_inner(|inner| inner.danger_accept_invalid_certs(accept_invalid_certs))
}
/// Sets the default headers for every request.
///
/// # Example
///
/// ```rust
/// use reqwest::header;
/// # fn build_client() -> Result<(), Box<std::error::Error>> {
/// let mut headers = header::HeaderMap::new();
/// headers.insert(header::AUTHORIZATION, header::HeaderValue::from_static("secret"));
///
/// // get a client builder
/// let client = reqwest::Client::builder()
/// .default_headers(headers)
/// .build()?;
/// let res = client.get("https://www.rust-lang.org").send()?;
/// # Ok(())
/// # }
/// ```
///
/// Override the default headers:
///
/// ```rust
/// use reqwest::header;
/// # fn build_client() -> Result<(), Box<std::error::Error>> {
/// let mut headers = header::HeaderMap::new();
/// headers.insert(header::AUTHORIZATION, header::HeaderValue::from_static("secret"));
///
/// // get a client builder
/// let client = reqwest::Client::builder()
/// .default_headers(headers)
/// .build()?;
/// let res = client
/// .get("https://www.rust-lang.org")
/// .header(header::AUTHORIZATION, "token")
/// .send()?;
/// # Ok(())
/// # }
/// ```
pub fn default_headers(self, headers: header::HeaderMap) -> ClientBuilder {
self.with_inner(move |inner| inner.default_headers(headers))
}
/// Enable auto gzip decompression by checking the ContentEncoding response header.
///
/// If auto gzip decompresson is turned on:
/// - When sending a request and if the request's headers do not already contain
/// an `Accept-Encoding` **and** `Range` values, the `Accept-Encoding` header is set to `gzip`.
/// The body is **not** automatically inflated.
/// - When receiving a response, if it's headers contain a `Content-Encoding` value that
/// equals to `gzip`, both values `Content-Encoding` and `Content-Length` are removed from the
/// headers' set. The body is automatically deinflated.
///
/// Default is enabled.
pub fn gzip(self, enable: bool) -> ClientBuilder {
self.with_inner(|inner| inner.gzip(enable))
}
/// Add a `Proxy` to the list of proxies the `Client` will use.
pub fn proxy(self, proxy: Proxy) -> ClientBuilder {
self.with_inner(move |inner| inner.proxy(proxy))
}
/// Set a `RedirectPolicy` for this client.
///
/// Default will follow redirects up to a maximum of 10.
pub fn redirect(self, policy: RedirectPolicy) -> ClientBuilder {
self.with_inner(move |inner| inner.redirect(policy))
}
/// Enable or disable automatic setting of the `Referer` header.
///
/// Default is `true`.
pub fn referer(self, enable: bool) -> ClientBuilder {
self.with_inner(|inner| inner.referer(enable))
}
/// Set a timeout for connect, read and write operations of a `Client`.
///
/// Default is 30 seconds.
///
/// Pass `None` to disable timeout.
pub fn timeout<T>(mut self, timeout: T) -> ClientBuilder
where
T: Into<Option<Duration>>,
{
self.timeout = Timeout(timeout.into());
self
}
/// Sets the maximum idle connection per host allowed in the pool.
///
/// Default is usize::MAX (no limit).
pub fn max_idle_per_host(self, max: usize) -> ClientBuilder {
self.with_inner(move |inner| inner.max_idle_per_host(max))
}
/// Set a timeout for only the connect phase of a `Client`.
///
/// Default is `None`.
pub fn connect_timeout<T>(self, timeout: T) -> ClientBuilder
where
T: Into<Option<Duration>>,
{
let timeout = timeout.into();
if let Some(dur) = timeout {
self.with_inner(|inner| inner.connect_timeout(dur))
} else {
self
}
}
fn with_inner<F>(mut self, func: F) -> ClientBuilder
where
F: FnOnce(async_impl::ClientBuilder) -> async_impl::ClientBuilder,
{
self.inner = func(self.inner);
self
}
/// Only use HTTP/2.
///
/// # Example
///
/// ```
/// let client = reqwest::Client::builder()
/// .h2_prior_knowledge()
/// .build().unwrap();
/// ```
pub fn h2_prior_knowledge(self) -> ClientBuilder {
self.with_inner(|inner| inner.h2_prior_knowledge())
}
/// Enable case sensitive headers.
///
/// # Example
///
/// ```
/// let client = reqwest::Client::builder()
/// .http1_title_case_headers()
/// .build().unwrap();
/// ```
pub fn http1_title_case_headers(self) -> ClientBuilder {
self.with_inner(|inner| inner.http1_title_case_headers())
}
/// Bind to a local IP Address
///
/// # Example
///
/// ```
/// use std::net::IpAddr;
/// let local_addr = IpAddr::from([12, 4, 1, 8]);
/// let client = reqwest::Client::builder()
/// .local_address(local_addr)
/// .build().unwrap();
/// ```
pub fn local_address<T>(self, addr: T) -> ClientBuilder
where
T: Into<Option<IpAddr>>,
{
self.with_inner(move |inner| inner.local_address(addr))
}
/// Enable a persistent cookie store for the client.
///
/// Cookies received in responses will be preserved and included in
/// additional requests.
///
/// By default, no cookie store is used.
///
/// # Example
///
/// ```
/// let client = reqwest::Client::builder()
/// .cookie_store(true)
/// .build()
/// .unwrap();
/// ```
pub fn cookie_store(self, enable: bool) -> ClientBuilder {
self.with_inner(|inner| inner.cookie_store(enable))
}
/// Set a lookup ip strategy.
///
/// Default is `Ipv4thenIpv6`.
///
/// # Example
///
/// ```
/// use reqwest::LookupIpStrategy;
///
/// let client = reqwest::Client::builder()
/// .dns_strategy(LookupIpStrategy::Ipv4Only)
/// .build()
/// .unwrap();
/// ```
#[cfg(feature = "trust-dns")]
pub fn dns_strategy(self, strategy: LookupIpStrategy) -> ClientBuilder {
self.with_inner(|inner| inner.dns_strategy(strategy))
}
}
impl Client {
/// Constructs a new `Client`.
///
/// # Panic
///
/// This method panics if TLS backend cannot initialized, or the resolver
/// cannot load the system configuration.
///
/// Use `Client::builder()` if you wish to handle the failure as an `Error`
/// instead of panicking.
pub fn new() -> Client {
ClientBuilder::new()
.build()
.expect("Client::new()")
}
/// Creates a `ClientBuilder` to configure a `Client`.
///
/// This is the same as `ClientBuilder::new()`.
pub fn builder() -> ClientBuilder {
ClientBuilder::new()
}
/// Convenience method to make a `GET` request to a URL.
///
/// # Errors
///
/// This method fails whenever supplied `Url` cannot be parsed.
pub fn get<U: IntoUrl>(&self, url: U) -> RequestBuilder {
self.request(Method::GET, url)
}
/// Convenience method to make a `POST` request to a URL.
///
/// # Errors
///
/// This method fails whenever supplied `Url` cannot be parsed.
pub fn post<U: IntoUrl>(&self, url: U) -> RequestBuilder {
self.request(Method::POST, url)
}
/// Convenience method to make a `PUT` request to a URL.
///
/// # Errors
///
/// This method fails whenever supplied `Url` cannot be parsed.
pub fn put<U: IntoUrl>(&self, url: U) -> RequestBuilder {
self.request(Method::PUT, url)
}
/// Convenience method to make a `PATCH` request to a URL.
///
/// # Errors
///
/// This method fails whenever supplied `Url` cannot be parsed.
|
/// Convenience method to make a `DELETE` request to a URL.
///
/// # Errors
///
/// This method fails whenever supplied `Url` cannot be parsed.
pub fn delete<U: IntoUrl>(&self, url: U) -> RequestBuilder {
self.request(Method::DELETE, url)
}
/// Convenience method to make a `HEAD` request to a URL.
///
/// # Errors
///
/// This method fails whenever supplied `Url` cannot be parsed.
pub fn head<U: IntoUrl>(&self, url: U) -> RequestBuilder {
self.request(Method::HEAD, url)
}
/// Start building a `Request` with the `Method` and `Url`.
///
/// Returns a `RequestBuilder`, which will allow setting headers and
/// request body before sending.
///
/// # Errors
///
/// This method fails whenever supplied `Url` cannot be parsed.
pub fn request<U: IntoUrl>(&self, method: Method, url: U) -> RequestBuilder {
let req = url
.into_url()
.map(move |url| Request::new(method, url));
RequestBuilder::new(self.clone(), req)
}
/// Executes a `Request`.
///
/// A `Request` can be built manually with `Request::new()` or obtained
/// from a RequestBuilder with `RequestBuilder::build()`.
///
/// You should prefer to use the `RequestBuilder` and
/// `RequestBuilder::send()`.
///
/// # Errors
///
/// This method fails if there was an error while sending request,
/// redirect loop was detected or redirect limit was exhausted.
pub fn execute(&self, request: Request) -> ::Result<Response> {
self.inner.execute_request(request)
}
}
impl fmt::Debug for Client {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Client")
//.field("gzip", &self.inner.gzip)
//.field("redirect_policy", &self.inner.redirect_policy)
//.field("referer", &self.inner.referer)
.finish()
}
}
impl fmt::Debug for ClientBuilder {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("ClientBuilder")
.finish()
}
}
#[derive(Clone)]
struct ClientHandle {
timeout: Timeout,
inner: Arc<InnerClientHandle>
}
type ThreadSender = mpsc::UnboundedSender<(async_impl::Request, oneshot::Sender<::Result<async_impl::Response>>)>;
struct InnerClientHandle {
tx: Option<ThreadSender>,
thread: Option<thread::JoinHandle<()>>
}
impl Drop for InnerClientHandle {
fn drop(&mut self) {
self.tx.take();
self.thread.take().map(|h| h.join());
}
}
impl ClientHandle {
fn new(builder: ClientBuilder) -> ::Result<ClientHandle> {
let timeout = builder.timeout;
let builder = builder.inner;
let (tx, rx) = mpsc::unbounded();
let (spawn_tx, spawn_rx) = oneshot::channel::<::Result<()>>();
let handle = try_!(thread::Builder::new().name("reqwest-internal-sync-runtime".into()).spawn(move || {
use tokio::runtime::current_thread::Runtime;
let built = (|| {
let rt = try_!(Runtime::new());
let client = builder.build()?;
Ok((rt, client))
})();
let (mut rt, client) = match built {
Ok((rt, c)) => {
if let Err(_) = spawn_tx.send(Ok(())) {
return;
}
(rt, c)
},
Err(e) => {
let _ = spawn_tx.send(Err(e));
return;
}
};
let work = rx.for_each(move |(req, tx)| {
let mut tx_opt: Option<oneshot::Sender<::Result<async_impl::Response>>> = Some(tx);
let mut res_fut = client.execute(req);
let task = future::poll_fn(move || {
let canceled = tx_opt
.as_mut()
.expect("polled after complete")
.poll_cancel()
.expect("poll_cancel cannot error")
.is_ready();
if canceled {
trace!("response receiver is canceled");
Ok(Async::Ready(()))
} else {
let result = match res_fut.poll() {
Ok(Async::NotReady) => return Ok(Async::NotReady),
Ok(Async::Ready(res)) => Ok(res),
Err(err) => Err(err),
};
let _ = tx_opt
.take()
.expect("polled after complete")
.send(result);
Ok(Async::Ready(()))
}
});
::tokio::spawn(task);
Ok(())
});
// work is Future<(), ()>, and our closure will never return Err
rt.block_on(work)
.expect("runtime unexpected error");
}));
// Wait for the runtime thread to start up...
match spawn_rx.wait() {
Ok(Ok(())) => (),
Ok(Err(err)) => return Err(err),
Err(_canceled) => event_loop_panicked(),
}
let inner_handle = Arc::new(InnerClientHandle {
tx: Some(tx),
thread: Some(handle)
});
Ok(ClientHandle {
timeout: timeout,
inner: inner_handle,
})
}
fn execute_request(&self, req: Request) -> ::Result<Response> {
let (tx, rx) = oneshot::channel();
let (req, body) = req.into_async();
let url = req.url().clone();
self.inner.tx
.as_ref()
.expect("core thread exited early")
.unbounded_send((req, tx))
.expect("core thread panicked");
let write = if let Some(body) = body {
Either::A(body.send())
//try_!(body.send(self.timeout.0), &url);
} else {
Either::B(future::ok(()))
};
let rx = rx.map_err(|_canceled| event_loop_panicked());
let fut = write.join(rx).map(|((), res)| res);
let res = match wait::timeout(fut, self.timeout.0) {
Ok(res) => res,
Err(wait::Waited::TimedOut) => return Err(::error::timedout(Some(url))),
Err(wait::Waited::Executor(err)) => {
return Err(::error::from(err).with_url(url))
},
Err(wait::Waited::Inner(err)) => {
return Err(err.with_url(url));
},
};
res.map(|res| {
Response::new(res, self.timeout.0, KeepCoreThreadAlive(Some(self.inner.clone())))
})
}
}
#[derive(Clone, Copy)]
struct Timeout(Option<Duration>);
impl Default for Timeout {
fn default() -> Timeout {
// default mentioned in ClientBuilder::timeout() doc comment
Timeout(Some(Duration::from_secs(30)))
}
}
pub(crate) struct KeepCoreThreadAlive(Option<Arc<InnerClientHandle>>);
impl KeepCoreThreadAlive {
pub(crate) fn empty() -> KeepCoreThreadAlive {
KeepCoreThreadAlive(None)
}
}
#[cold]
#[inline(never)]
fn event_loop_panicked() -> ! {
// The only possible reason there would be a Canceled error
// is if the thread running the event loop panicked. We could return
// an Err here, like a BrokenPipe, but the Client is not
// recoverable. Additionally, the panic in the other thread
// is not normal, and should likely be propagated.
panic!("event loop thread panicked");
}
|
pub fn patch<U: IntoUrl>(&self, url: U) -> RequestBuilder {
self.request(Method::PATCH, url)
}
|
add-control-script.component.spec.ts
|
import { ComponentFixture, TestBed } from '@angular/core/testing';
import { AddControlScriptComponent } from './add-control-script.component';
describe('AddControlScriptComponent', () => {
let component: AddControlScriptComponent;
let fixture: ComponentFixture<AddControlScriptComponent>;
beforeEach(async () => {
await TestBed.configureTestingModule({
declarations: [ AddControlScriptComponent ]
})
.compileComponents();
});
beforeEach(() => {
fixture = TestBed.createComponent(AddControlScriptComponent);
component = fixture.componentInstance;
fixture.detectChanges();
});
it('should create', () => {
expect(component).toBeTruthy();
|
});
|
});
|
AppList.story.js
|
/*
Copyright 2019 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import React from 'react';
import { storiesOf } from '@storybook/react';
import { AppList } from './AppList';
import { StatusEnum } from 'app/services/releases';
storiesOf('Gravity/Dashboard', module)
.add('AppList', () => {
// create apps with different states
const apps = Object.getOwnPropertyNames(StatusEnum).map(name =>
makeApp(StatusEnum[name])
);
return (
<AppList apps={apps} pageSize="15"/>
);
});
const sample = {
chartName: 'slack',
chartVersion: '1.2.3.4',
name: 'slack',
namespace: 'default',
description: 'Collaboration tools and services',
endpoints: [{
name: 'Endpoint name',
|
name: 'Endpoint name',
description: 'Endpoint description2',
addresses: ['https://localhohst/sample/endpoint', 'https://localhohst1']
}],
version: '3.1.8',
status: StatusEnum.UNKNOWN,
updated: new Date('2019-03-11T23:18:16Z'),
updatedText: '2019-03-11T23:18:16Z',
icon: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAJQAAACoCAYAAAAGhMcCAAAABHNCSVQICAgIfAhkiAAAG1ZJREFUeJztnXmYFOWdx3/11tVdffd0z33hDAMMAXG4BgWRcEQhajhclSMmahIFFXOgq2Y35CJhsxIjoiaLugaEKJeiskZALoEBBQbDwMhwDHMPc/XdXd1Vb+0f4i6a6eljurq7hvo8Tz/wPG9Vvd+e+nbVW2+97/cFUFFRUVFRUVFRUVFRUVFRUVFRUVFRUVFRUVFRUVFRUVFRUUkZRKoFyAwFAOjK//FVn1SArvp8qUdIkRbZGGiG4hCrncDkFE0zDK+YoC0ZXkhb7DqCpJDg7PL5L51r95w5dpyvr9sleJ17AaBTZj02pGNv0eTbp3FDcyvYPGsWZdJxUkjEgtPrDdR3NHg/bzocanftwnzwMAD4ZNYjOwPFUBxttS+wz5q/xDr1rqHaQUNYgqJ73xJjCF5uEXsO7Ghq3/pf6wNN59cAxq0J1pPDFtiW2GaNXmioHJLP2I0kEL3/qSUBA9/Ywffsq6nt+bB6jeDwvgEKNpbiDYUo6oaMWQtW5923rJLJzAt74npDdLukjvf+erHltZXLBK97a0L06Ng5WfMn/cEyvWIQpWejFyMBhDpdYvvfDlQ5dlY/igV8IhF6kg2ZagH9gTKY7x309AsbcuY/NpQymFEsZgIAQCxL6L8x1mIcP/VO98kqveDoPAjxt2s0bKHt18W/uPsP5gnD7IilYhNDAJA6FhnGlBZqCu1zvSfrm3BQOBWnlpShWENRZtv3B6/c+LJ5wnQTgVDkHcJBEMDYsinzpJkTvCcPWoKdbTsh9oY7xZXmPDto+b1LNfk2pj/XfQIRoCmyc7rhBbe5qupaMR+qjv9oyUeRhkKsdkrJr159zTT6ZkOijknpDMgwZnKF48AOv+hxHYxlXzrL/LPiX977JJNlohKlh7abaE2x/RbXwdojkojrE3VcuVGioTLzHvjXLfZZC/JivcVFgjJakPa68sqej7YdkESxIZp9EENNLPrXOX/mSnO0CRUDAGyOVQMAlZ7P6t8CAG+ijy8HijMUN3jEr4uX/XEWYmJo8MaAJqeQDXa1DvPWVr8BkdtTGuu3Ktbbbh9TkmhzAwAAAaApyc5wf3KOEhzevye+gsTTj8ZHCkCoJHfRjxeROoN8T6cIQfbdS8ZQOsPMiJvq2Jn22ePHyGKmK5AcS2TdM3ERIFQiWyUJRFGGYjML5hkrp2fIXY+moJQ2jp/6YKTtDBXXPcjkWcN0eCUOw+jSDNpmnCd3PYlASYZC5ptmzKbkvDpdhW3Gv1QCQGYfm2Sap4ysTIYWxDGEsXLwbFDA+Up7gVdhNo3/ZlmyKtMUl+kpneHGcOVIy96oLbDpk6VHf8N1ZQBgTlZ98aIYQyGGLWey8rlk1UebMynaar8+bLmFux6ZdAnrJogEYzdyiKHKk1VfvCjHUBptIakzJu2pFHEcgYyW7HDlpEGbTWrppL26QpyGRCxdmKz64kUxhiIQpSEQmbQTSJAUIIphwm5AkgxBJu/Ph0hEAIkS1pErF4oxlCQKfiyEpFTrSBUEIggCEWnfb6gYQ2He34j9HjHVOlT6RjmGCvKng62Nih0ndK2gGEMBQLfjyO7PUy1CpW+UZChwHt65RfS6r9l2lBJQlKH4y03bHIf+Lvc4cJV+oChDAcbnW9etel10O9SrVJqiLEMBgO/86d+1vfXSGZBUT6UjijMUAHS3rn9usePQBz2pFqLyzyjRUICD/L4Lv1my1HVsv0e9UqUXijQUAIDg6l537qlF3+/68K12SRxwE3AVi2INBQAgeJ2bz69Ycmv9ysf2Bdua1F70NEDRhgIAAEGo7nh/w8xTD9z8QOPzzxzznKkOiD5Pou6Dip8Im2ySNp5HZnyCo/v11jfXrGvf9kolk1t8O1dSXqEtGpxPW2x6gtbG9T2DbY01iRY60BkohvoSjIOBQ4H62kOB+loEAHoA0ACgOK802J1IcdcCA81QV4MBwPXFJ1UJPtceym9DqaQVqqFUEopqKJWEohpKJaEkqlGOAMCMGLYcabSFBMUagEij3ARRDEgh3iHw/rMgCOcAIJBqSSnkyrmiyhFLFwCBtCDhAOZDDTgonAYAB/TjKaZ/hkKohM0smGee9K05prFTBjNZ+RzS6kmCZgmCiPdRXQawIOFgAIsuh+C7UNvTs3/7Ec/JIxsEr/ND+OJJcOCD0CDabpxrHFMyVz+6dAiTaeQQpyERiQgJi5Lo5cVgh8vnPX6xznnk7NZQp2szYHw+1mriNBTK4gYPfzJ30Y8XmSbMyCA5ffqYJzwkANC64WO09m8vmM23Ndze+e662vata1cIbscWAAimWqBMWDXFmU9l3n3TfYaxg20kFza1htJel82axpeNy/7eLWPdn57/WfvfPl4XuNi+EgDao60s5tsSYrVT8h54cnPxsj/O0g0ZpUM0owQzfRWCAMpgRsaKSZmWSbPu8DfWlfEt9Qcghgwm2m660zpj1A0yqvwK2B+Uuj84/j/YH/wk2n0QQ0223z1pS/6js+7gynJ1iI4uppGgKUJTZOfMU75RSbL0Hb7apppoQ89iapRTZut9pSvWbctd9JMhskbqJAuCAE1xGT14xfp7cu5eskMpkTnRQBm1iwqemrste8GkclKvietckRxLZN4zcUjRz+/aRhm190WzT9SGoozm+YN/t/FFc+U0E/Qn0zINITk9kb94+ejc+Y9sAYTyUq2nvyAdO6/wqXkvmsaXWfqdXUUQYBhTair+97tfpAza+RHrjkogRd1Q/LNVfzKMHJ+0sIpkQ1A05H7/6estk2b9GQA0qdYTNxQalbf4thf01xcnNBmGKy/g8h6Z+SdEoT5v89EYisuYtWC1ZcqdtgRpS1uQRgNFS393K2vPfSjVWuJEY50y8jnzzcOz5Di48aZhNvP0UasBIOyFJaKhaKt9Qd53l1USKH26leSEyconc77/xE+VeOujjNq7M+dPmihXiAeBCMi8e2IlZdYtCLdNpJo19lnzlzDZ+deGm66Q8c05eWxmwcJU64gRZJ468lEmyyzruWIyTaRlxqglEKZZ0KehEKu9yTr1rqGyKEtjSIORyJgxdz4o6NUUYqhK85QRw5NRl2Xy8KGIZW7qVUdfOzI5RdO0g4aw8shKb0wTvlUKiFLMj4nONt+uybMm5VyxBXaWzjJO662sT0MZhldMCLuq0wCHyS5gabOl119hOqIZlFWBtPJkt38dgkKgG5I/obeyvgxFaUuGp30En1zQRgtJmu2DU60jSoxsni0/mRVqiu2F0Muru74MhWiLXSefpPQGMSyQnN6Sah1RoqHN2qQlEgMAUCadDnrxT5+3PIKkFNMoTTgEAYjos5tZgiROWpawJEmCFG7YDUEwVFLnBxB0730TfRkGC86uazYxThKCIPJ+T7hyzAseSUje3FIpKGAsCo6kVRgBwen1QS/jpvo0lP/SuaiHLQw0RI8Liz2d9WHLXd560csnbTqN4PEJ2B86m6z6IsE3d7dDrIbynDl2HPC1OQWJ72gNhZzdR8OVi07f0VCXK5QsPYFLHT2A8blk1dcnkgS+2pbjEKOhgK+v2xW83HJNZgZ4Tla14WDgeLhyHBSO+2oa2pIiRgJwHTx7BNJk6HKwwyUGmjp29VbWp6EEr3Nvz4EdTfLISl+kEA9dOzZsh75PYKBn12fbpZD8vze+3SF4Tl/aIHtFUeKu+rwJe/m9vZVFeorrbN/6X+tFt+uaCmFyHjvg9Fw49Wqk7Xz17a96Tl50yq3H8WF1LfbyH8hdTzQIHl7qfP/YegDoNes0YrdAoOH86o73/nox4crSFNHrllrW/nY9CELkxaMFXN2+ft960cfL9oMLNHSEut7/dAUAhH3iTCY9O49f5Bs7V4crj6KfCbe3vLZymedMdVrcv2UFY2jb8Pxpz5kTy6PdxXe2ZXnnlqrTciTpYT8vtb78982C278p4QePA9+51kD7hgPLoI9JC1F1XApe99YLv3noOX6AN9C7dm/taN3wpwchzOU8DJ0dmw496Nhf05FILZKAoW39vuPu6ouPQ+S1j2Un1OESG//z7eewl9/a13ZRj50RHJ0HvScPWgxjJldQRsvA6kHHGLp2bem4uHLp9zAf2BPr7hLGTe7jF2qYbPMMTZFd199x3GJQgPZ1e052vn30LpCkaB6K9MbKsh9pS3JkWa2Kb3MIDb/d9HLg4uVnIIK5YxmMJQQ723Y6Duzwc6XllUx2AUsQyveV6HVLretWnWl88d/ujcdMXyIJYp37yLkDEsY3aktzbNFOWfo6oQ6X2Lz6vR3dO6vnA5YuRbmbLIaSsATe6nr3pRWbl/ONncshirmLsY7uw6LHdbB797YDoa7WYZrCshzKYEZyrgouF1KIB+cne531Kxa/0rVzy/ckUex3L7SEcZP3H5fe8lZf1NE24xDGbtJEOxxX8PCSY/fJ5sZV23/pq21+CiTojqHqxBpKAgi2dgvtf913pO21Xd8TXb6NEOX09P44gaN0hluN46c+aJvxL5Wa4jI9bc6kEMcRBJlmOWaS9MW7OY8L8x2tIc/JqrauHRu2ey6cejWqp7l4oNAorjjrfsu0kXdwwwuz6QwjTepYRFAkAEiAgwKILr8Y6nDy7k/Pne/+6NQboU7HBsDQGEdtWfmPf/u4dcYNufFIlUQMoj8kYadX8Dd2ehx7PqtyH7+w9kpXRUzvcxN1acmkdIYbaav9emS0ZPe5EmYqwFgSeb9H7OmsDzm7j17pAU/WU6sGMVQFaeLGkUZdMWIpPUggYT/fIzi8dYLLfxAwPgv9a3hztN34MJNlGi7hOMZAiGJQdPvbQj2+k9jPHwKAy/3QoqKioqKioqKioqKiojJgSFi3gQHRN2aQ2uvNiMmmJCLmbgMCAdEc8ta0iL6XIMa+j69BUYDKzBR9UwahGaxDjAVJQPgl0dOFA/XdYuBoEHBSuw00DKqwGshxNjNZrGWQHoj0WkNGECDY4xHbOnpCJ91+3K9ug351bBoQfevN2twHv6MtrixhjHo70lBaRBFUnId903O+5anuIxU4hgi+L0EABXkkN/923aAF39TmleSRHGsmGZIlSCCAgJCEwS0FcZvoC33Cd7RtcV/cfibkeFUALEvHJkXBqPIi7v57p1nuGD+My86x0rRJRyKaItLuxYIgSuANiFKXAwtnm/yet/Y5qvYcc691+3FyOjZZhCbO40r+4wfGoWOLKEN8L616IU5Dcdkk98PHjMN/eruuOM+A6KjkBCUMhwJtzlWuz9Z/xncvh9hGGPSFbVQpt/zphVkLJ47Qm5jo5KQVkgRwoTUovPR2xycbP+p5gg/ij6PdN9ZvS+WT3E9WZlT+vFKTZSATfOWO1VAIIG+6tuDlX1hH35ZLcnGljrjFoLTWU3vmJefpB4KAq+I5xpcwFKpcOtf+yuLZtmEGjlSek76GiCU4+A+ve+kLjb9pbA+tgih682M5CZoRtPXZVzMnPzmCydAiGZoBNcEe925/85+lKMJTEUDJD4zl7yy3jrnZiti4hz2wiCTGazLtxZThzr18a40g4bp4jqNj0cznH8t/84Fv24o1TBpFavcDRBBQlM2wt44zTjl6xm9r6w7tgQimivpEDKZNv/iL/eaHCylDOrz5zbzfMOzNZaaR12uJ/schISDgDl2xfaVl3H8zgCpj3Z+hUOUfH8n/7zmTzfaBYaWvUpzNUq8/U/TwkEL2F5G2jcpQBkTPWWWb8HgOFd9tJcFQN2myn/uxeUQFleDxWN/WFdkfMpevBYBY4h9tj82zr/3OzWZ7QsWkGXkZNLnm8YLHDVo0p6/tIp4RBJD1mPEbfxhBW9MiyNSI6LuWW8bM0xEJexb4PxAQ8APd0PKRrHV5tPuMKuWWL/mOrXwgXpm+zqhSTrPs3qw/IICwGZ4RDXUdbXr0Hn3poMRKixv9fcYhTw+mjbKFVhlIhviJceRCCtCoSNtSFIx6emHWwoHQAI+WBdMtg0oK2EfDlUcylG2hoXRhtI/icmNA9K13cYNkT5W7UZNtGkab74+0XXkRd//EEXqT3HrSCZOOIu6faVsIYZoFfSfYIfqWadr8pAZZ9cU4NnN+QRIeChgCwVzDoDug77xyzb3TLHcosZ+pv9w21pBv0qFbeivr01CltGlavP07MqCZoc0fn6zTN5a1ZzOAKsKKYVDF+GFcdpLkpBW5doYszdfEnLGJRjEZFXL0N8UDBah0CG1KWqJcNsnRVlIzLly51UCOy7HS12QAKSIARpdxFRBjgh26jjLIksgfD1pElhlJNml9YAaCQRlIUxyu3GYmi006mRLmFUBJLpsFsRrKSmnSZm0XGpA56jlJiaiPQKAlyLC5lVoG6eOcejcgsJooDmLN2BRwctPGgpIoYOh91gaJkIaUkveenvjKP/+MBHKkGSgHLEq9eqPPBLsOMRD1goSJoAvzHkiTUK1IuLy4h09eImLa0eEQvBBjgp1QKzga5JP0z5wTXE2gkDWAu1xCXbdXHNDhIX1xqj7QAL28KO7zlneS7zos9H5lSzheSZA+DzrCRhCmGz0u4WDT5RCfah2pIChKcKLOe7i3sj4N1SR6d9WFXEn5o10QXPylkPvdZNSVCAQMtR9+4k6PENUkU9fA8w3todgzNv1YOPiO71KtPLK+yjueSzX9HeCWZPDW/d0bnF7hmmubb9nfU+vn8cHeyiI9hgc2+86taRK9srYVmkSvuNV7YTVEmfCRLjR3hta/fcDVnGodyaTpclDcuLtnDYR5eIrYr9Mp8G+scdZUiTKtQyGABM85//FxN+bflKUCGcEYmp99s/3Z5o4kRAGnASKW4NlNl6s6HMIb4baJpqPQt8lz4dEP/I2JGsT/Fd711re/7al/HBTSXfB1WrpCL//81ZYPAsGB76nth1ydG3c7HoU+ZsJEl7EJ+MQzXZ8s/ZTvTOjaL4cD7Z5/7/n0EbmmMiWJwPuHnT9aueHyyaA4cJtTR874fMteal4qCPhEX9tF/SrDgfkNP+zYt3ivv8UZTwTR1WCQYJevuefhzo8Xu3Foc78OlgZgDM1r3u6Y+6vXWo95AnhAuQpLAB8ddzsX/bZ+scMtRAzfj+ndWDfmX/9R18ezX3TWfO4Wg3H94Zw4KD3vPHV6SffB2Q7Mr4vnGOkIxnD+5e2dM7+74tLf6hr50EB4L+P2idIfN13+fNGKS7O7XcLr0ewT81gnUcL1VXz7pr2BVspMsYPzKJ2WISKPqHaLQenv/sbOJ7urXnrP13C/KOGYuiN0iB47X1c6U5/E0aNveS6caBF922PYxXupLfjulgPOz31+PKQoi7EadSRKt5nCkfAEsPR+lavrsecb//L2fseDgiidinbffn1VBFCSR3LzvskVzJnEZg/OpTjOiGgSEYgQJFHyYEFsFny+/YGWz/f4m7e0iL4tGCCuVRnslHbxe5m3rs6itEkbcTCvbeernwY7Hohzd6NJh2aML9fPn1VpHD+0UGOx6CmKZQhEkuljMVGSpFAISx4fFps6g7591d66D446tzZ3hjZjDOdjPV6ivhgCADOLULkW6EIEoBEA+3kQG3mMTwOAA/rZx6RAQ12NhkJQqmVRGUsjM0kRaTGDCOCL+NGQCG4/jxv4YP/PVaIGrGEA6OYx/piHa/L1ViQCAoZTbj8+5fYrqu82Zq7ZEYcq8qAaSiWhqIZSSSiqoVQSSjokqSgVDgHEtbbKlXHzAfhiUcUB1UpXDRUn+Xb64WX3ZP0knn0DQUnocoY8dc180+lLgeOX2kLvBoK4CgaAuVRDxUmBnRm+YLo1rsV6rqLcE8DTzzUFHt+811Hz1l7H6m6XsAn6F1qbUtQ2VJzgBM2i0msQMaqU0/zmwdzRH60qfeWeqdYdFAURk1/SFdVQaUR+JkM+90je5BceK/jApEPzUq0nHlRDpRkUScDcyZas158qfs1qoBalWk+sqIZKQwgCYOJIvf75pXl/Yhk0OdV6YkE1VBrzrbEmy9I5thcBwJpqLdGiGiqNIQiAH95pH1ZerHkq1VqiRTVUmmPWkcTjczPvQwhKUq0lGlRDKYDpYw223Ax6dqp1RINqKAVg4Ehi+ljj3FTriAbVUAph6g36IaCAxrlqKIWQb2c4lkHlqdYRCdVQCkHPIVLLooJU64iEkgwlJur9mRKhSURQBGhTrSMSijFUCGN3CJI311sACQRCCoYtFyAoJHHquYglSZDSP/9BMYbyQ6jBg4WkJVL4sSA5cLAtXHmPR2zzBpLnKJdXFP08TmpEZTwoxlA8xqdbBF/Sxgl14IDQJfpPhi3vCZ3scuCIK1wmiqbOoO/KvLm0RjGGAgDHfr71bLIqOx90edw4dChcuduPD51t8nuSpWdPtecsfDEJM61RkqHwHl/TNg8OJeU287a/vgr6Xm7+8lv7HEmJcPT4sPThUdc2UMAQYSUZCppF3+Z9fGuX3PVcENyh/f6WtZG223PMvfZCczAkt56dx91dzZ3KiD1SlKEwwPk1jpp18UYJRVeHBK+4aj9149COSNu6/XjHi+90fCpnIpTbJ0rPbWpfF09wRSpQlKEAAE6Hen7/uufsWbnO4aFAu3uz7/wTEF1EY2DjRz1PHPjM45ZDiyQB/OW9rrM1FwO/l+P4cpAua+HFgvdYqKtmBG35TjFtSGiKSYPgFh7uOLC8Bwc3RruPKEoNh097xFvHGaeY9VRCf6B7TridT7zcskgUpZpEHldOlGgoECVcvz/Q2j6azZyeS3EJWbOuVfCJD3d8/PI5wbUcYmz8urz4yNEzftvUMYbRRi4xK2YdOePz/eA/Gpa6feK2RBwvWSjSUAAAfkms/sDfcLGQNky+jjZy8S4UKQHAqVB34KHOA6vOhBzPAEDY3vE+wG3doT17TrjpMUO4MVkWmoo3UkwQJXjnkKvzoVWND3e7BcVFRirWUAAAvIRPfehr2tUu+r9Rzljy9IiOabV7Nw5Jf3WfvfhE95GHWkXfC9DLYjgxIHQ5xd3b9jvOMDQxtqyANWuY6OVIEkBLZ0j85euth3+/4fJ8X0Dc2Q8tKSNtovn6CWej2AXzuNIld3JFQwfTRpYier/zYJCgRfSJu/xNTevd59ZfCDnXYIDWRIpBADklBeyS+2faFt421pCfa2fIcNYKihLUNfD8lv09tRt396y5Eiqv2JnDA8VQX8JpETUhn9RNu57NmDCUMhfaSY2OQgh1CwHfBcHdXh3sOn4u5NzlxqG9ACBLmP9V2Ew6dEtpvmba6DKuoiSXzbKaKA6LEu5wCN5T9YGGE3Xeww3toV1+Hh8GBRvpSwaaob4OBf/fNYKv+qQCdNXnSz1JexeooqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKicg3wv15sZ/dPegJIAAAAAElFTkSuQmCC',
}
function makeApp(status){
return {
...sample,
status
}
}
|
description: 'Endpoint description',
addresses: ['https://localhohst/sample/endpoint/Lorem/Ipsum_is_si']
},
{
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.