file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
RavenDB_5669.ts
import * as assert from "assert"; import { testContext, disposeTestDocumentStore } from "../../Utils/TestUtil"; import { IDocumentStore, AbstractIndexCreationTask, } from "../../../src"; describe("Issue RavenDB-5669", function () { let store: IDocumentStore; let index; beforeEach(async function () { index = new Animal_Index(); store = await testContext.getDocumentStore(); await store.executeIndex(index); await storeAnimals(store); }); afterEach(async () => await disposeTestDocumentStore(store)); it("working with different search term order", async () => { const session = store.openSession(); const query = session.advanced.documentQuery({ documentType: Animal, indexName: index.getIndexName() }); query.openSubclause() .whereEquals("type", "Cat") .orElse() .search("name", "Peter*") .andAlso() .search("name", "Pan*") .closeSubclause(); const results = await query.all(); assert.strictEqual(results.length, 1); }); it("working with subclause", async () => { const session = store.openSession(); const query = session.advanced.documentQuery({ documentType: Animal, indexName: index.getIndexName() }); query.openSubclause() .whereEquals("type", "Cat") .orElse() .openSubclause() .search("name", "Peter*") .andAlso() .search("name", "Pan*") .closeSubclause() .closeSubclause(); const results = await query.all(); assert.strictEqual(results.length, 1); }); }); async function storeAnimals(store: IDocumentStore) { { const session = store.openSession(); const animal1 = new Animal(); animal1.name = "Peter Pan"; animal1.type = "Dog"; const animal2 = new Animal(); animal2.name = "Peter Poo"; animal2.type = "Dog"; const animal3 = new Animal(); animal3.name = "Peter Foo"; animal3.type = "Dog"; await session.store(animal1); await session.store(animal2); await session.store(animal3); await session.saveChanges(); } await testContext.waitForIndexing(store, store.database); } class Animal { public type: string; public name: string; } // tslint:disable-next-line:class-name class
extends AbstractIndexCreationTask { public constructor() { super(); this.map = "from animal in docs.Animals select new { name = animal.name, type = animal.type }"; this.analyze("name", "StandardAnalyzer"); this.index("name", "Search"); } }
Animal_Index
mod.rs
#[doc = r" Register block"] #[repr(C)] pub struct RegisterBlock { #[doc = "0x00 - Control Register"] pub ctrl: CTRL, #[doc = "0x04 - Command Register"] pub cmd: CMD, #[doc = "0x08 - Synchronization Busy Register"] pub syncbusy: SYNCBUSY, } #[doc = "Control Register"] pub struct CTRL { register: ::vcell::VolatileCell<u32>, } #[doc = "Control Register"] pub mod ctrl; #[doc = "Command Register"] pub struct CMD { register: ::vcell::VolatileCell<u32>, }
pub mod cmd; #[doc = "Synchronization Busy Register"] pub struct SYNCBUSY { register: ::vcell::VolatileCell<u32>, } #[doc = "Synchronization Busy Register"] pub mod syncbusy;
#[doc = "Command Register"]
run_tests.py
#!/usr/bin/env -S python3 -u import argparse import os import random import shutil import signal import subprocess import sys N_SIMPLE = 10 DEFAULT_TIMEOUT = 40 REPEAT = 5 def setup_terminal():
def run_test(seed, board, timeout): print("Testing seed %u..." % seed) try: launch = subprocess.Popen( ['./launch', '--board', board, '-t', '--timeout=%d' % timeout, 'test=all', 'seed=%u' % seed, 'repeat=%d' % REPEAT]) rc = launch.wait() if rc: print("Run `launch -d test=all seed=%u repeat=%u` to reproduce " "the failure." % (seed, REPEAT)) sys.exit(rc) except KeyboardInterrupt: launch.send_signal(signal.SIGINT) sys.exit(launch.wait()) if __name__ == '__main__': setup_terminal() parser = argparse.ArgumentParser( description='Automatically performs kernel tests.') parser.add_argument('--times', type=int, default=N_SIMPLE, help='Run tests given number of times.') parser.add_argument('--board', default='malta', choices=['malta', 'rpi3'], help='Emulated board.') parser.add_argument('-T', '--timeout', type=int, default=DEFAULT_TIMEOUT, help='Test-run will fail after n seconds.') args = parser.parse_args() # Run tests using n random seeds for _ in range(0, args.times): run_test(random.randint(0, 2**32), args.board, args.timeout) print("Tests successful!") sys.exit(0)
cols, rows = shutil.get_terminal_size(fallback=(132, 43)) os.environ['COLUMNS'] = str(cols) os.environ['LINES'] = str(rows) if sys.stdin.isatty(): subprocess.run(['stty', 'cols', str(cols), 'rows', str(rows)])
horiba-pentra-60-parser.js
let { HoribaPentra60Parser } = require('./../horiba-pentra-60-parser'); let parser = new HoribaPentra60Parser(); let results = parser.parse(` 1H|\^&|||ABX|||||||P|E1394-97|20060210061533 2P|1 3O|1|17033680|761|^^^DIF|||||||||||||||||||||F 4R|1|^^^WBC^804-5|10.1|10)/mm)||H||W 5C|1|I|Alarm_WBC^LMNE+|I 6C|2|I|EOSINOPHILIA|I 7R|2|^^^LYM#^731-0|3.51|||||W 0R|3|^^^LYM%^736-9|34.7|||||W 1R|4|^^^MON#^742-7|0.22|||||W 2R|5|^^^MON%^744-3|2.2|||||W 3R|6|^^^NEU#^751-8|5.43|||||W 4R|7|^^^NEU%^770-8|53.7|||||W
7R|10|^^^BAS#^704-7|0.00|||||W 0R|11|^^^BAS%^706-2|0.0|||||W 1R|12|^^^RBC^789-9|5.14|102/mm)||||F 2C|1|I|HYPOCHROMIA|I 3R|13|^^^HGB^717-9|13.8|g/dl||||F 4R|14|^^^HCT^4544-3|44.0|%||||F 5R|15|^^^MCV^787-2|86|5m)||||F 6R|16|^^^MCH^785-6|26.8|pg||L||F 7R|17|^^^MCHC^786-4|31.4|g/dl||LL||F 0R|18|^^^RDW^788-0|11.5|%||||F 1R|19|^^^PLT^777-3|355|10)/mm)||||F 2R|20|^^^MPV^776-5|8.3|5m)||||F 3L|1|N `); console.log(results);
5R|8|^^^EOS#^711-2|0.95|||HH||W 6R|9|^^^EOS%^713-8|9.4|||||W
index.js
"use strict"; var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); exports.createIPXHandler = void 0; const path_1 = require("path"); const os_1 = require("os"); const ipx_1 = require("ipx"); const functions_1 = require("@netlify/functions"); const ufo_1 = require("ufo"); const etag_1 = __importDefault(require("etag")); const http_1 = require("./http"); function
({ cacheDir = (0, path_1.join)((0, os_1.tmpdir)(), 'ipx-cache'), basePath = '/_ipx', ...opts } = {}) { const ipx = (0, ipx_1.createIPX)({ ...opts, dir: (0, path_1.join)(cacheDir, 'cache') }); const handler = async (event, _context) => { const host = event.headers.host; const protocol = event.headers['x-forwarded-proto'] || 'http'; let domains = opts.domains || []; const requestEtag = event.headers['if-none-match']; const url = event.path.replace(basePath, ''); const [modifiers = '_', ...segments] = url.substr(1).split('/'); let id = decodeURIComponent(segments.join('/')); const requestHeaders = {}; const isLocal = !id.startsWith('http'); if (isLocal) { id = `${protocol}://${host}${id.startsWith('/') ? '' : '/'}${id}`; if (event.headers.cookie) { requestHeaders.cookie = event.headers.cookie; } if (event.headers.authorization) { requestHeaders.authorization = event.headers.authorization; } } else { if (typeof domains === 'string') { domains = domains.split(',').map(s => s.trim()); } const hosts = domains.map(domain => (0, ufo_1.parseURL)(domain, 'https://').host); // Parse id as URL const parsedUrl = (0, ufo_1.parseURL)(id, 'https://'); // Check host if (!parsedUrl.host) { return { statusCode: 403, body: 'Hostname is missing: ' + id }; } if (!hosts.find(host => parsedUrl.host === host)) { return { statusCode: 403, body: 'Hostname is missing: ' + parsedUrl.host }; } } const { response, cacheKey, responseEtag } = await (0, http_1.loadSourceImage)({ cacheDir, url: id, requestEtag, modifiers, isLocal, requestHeaders }); if (response) { return response; } const res = await (0, ipx_1.handleRequest)({ url: `/${modifiers}/${cacheKey}`, headers: event.headers }, ipx); const body = typeof res.body === 'string' ? res.body : res.body.toString('base64'); res.headers.etag = responseEtag || (0, etag_1.default)(body); delete res.headers['Last-Modified']; if (requestEtag && requestEtag === res.headers.etag) { return { statusCode: 304, message: 'Not Modified' }; } return { statusCode: res.statusCode, message: res.statusMessage, headers: res.headers, isBase64Encoded: typeof res.body !== 'string', body }; }; return (0, functions_1.builder)(handler); } exports.createIPXHandler = createIPXHandler;
createIPXHandler
custom.d.ts
declare module '*.gif'; declare module '*.svg';
// For typescript to recognize declare module '*.jpg'; declare module '*.png'; declare module '*.jpeg';
api-routes.js
'use strict'; const express = require('express'); // import getAllMoviesHandler const getAllMoviesHandler = require('../API/API.controller'); // import Interface to contact with DB const Interface = require('../models/Interface'); // import user model const userModel = require('../models/usersModel'); // create new instance of Interface class const interfaceDB = new Interface(userModel); // access control list middleware const permissions = require('../middlewares/acl'); // bearer auth middleware const bearerAuth = require('../middlewares/bearerAuth'); // use express Router const router = express.Router(); // user routes router.get('/movies', getAllMoviesHandler); router.get('/favourites', bearerAuth, permissions('read'), handleGetFav); router.post('/favourite', bearerAuth, permissions('create'), handleAddToFav); router.delete('/favourite/:id', bearerAuth, permissions('delete'), handleDeleteFromFav); // get all favourites handler async function
(req, res, next) { try { let id = req.user._id; let allRecords = await interfaceDB.get(id); res.status(200).send(allRecords); } catch (error) { next('get favourites error', error.message); } } // add Favourt handler async function handleAddToFav(req, res, next) { let id = req.user._id; let obj = req.body; try { let userData = await interfaceDB.get(id); userData.favoriteMovies.push(obj); await userData.save(); res.status(201).send(userData); } catch (error) { next('add favourite error', error.message); } } // delete Favourt handler async function handleDeleteFromFav(req, res, next) { const userId = req.user._id; const { id } = req.params; try { let userData = await interfaceDB.get(userId); const updatedFavourite = userData.favoriteMovies.filter((item) => item._id != id); userData.favoriteMovies = updatedFavourite; await userData.save(); res.status(200).send(userData); } catch (error) { next('delete favourite error', error.message); } } module.exports = router;
handleGetFav
cmd_test.go
/* Copyright 2018 Turbine Labs, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1 import ( "testing" "github.com/turbinelabs/rotor" "github.com/turbinelabs/test/assert" ) func TestFileCmd(t *testing.T) { mockUpdaterFromFlags := rotor.NewMockUpdaterFromFlags(nil) cmd := FileCmd(mockUpdaterFromFlags) cmd.Flags.Parse([]string{}) runner := cmd.Runner.(*fileRunner) assert.Equal(t, runner.updaterFlags, mockUpdaterFromFlags) assert.NonNil(t, runner.codecFlags) } func TestRESTCmd(t *testing.T)
{ mockUpdaterFromFlags := rotor.NewMockUpdaterFromFlags(nil) cmd := RESTCmd(mockUpdaterFromFlags) cmd.Flags.Parse([]string{}) runner := cmd.Runner.(*restRunner) assert.Equal(t, runner.updaterFlags, mockUpdaterFromFlags) assert.NonNil(t, runner.clustersNodes) }
testes-admin.client.config.js
(function () { 'use strict'; // Configuring the Articles Admin module angular .module('testes.admin') .run(menuConfig); menuConfig.$inject = ['menuService']; function me
enus) { Menus.addSubMenuItem('topbar', 'admin', { title: 'Gerenciar Testes', state: 'admin.testes.list' }); } }());
nuConfig(M
frame-pointer.rs
// compile-flags: --crate-type=rlib // revisions: aarch64-apple aarch64-linux force x64-apple x64-linux // [aarch64-apple] needs-llvm-components: aarch64 // [aarch64-apple] compile-flags: --target=aarch64-apple-darwin // [aarch64-linux] needs-llvm-components: aarch64 // [aarch64-linux] compile-flags: --target=aarch64-unknown-linux-gnu // [force] needs-llvm-components: x86 // [force] compile-flags: --target=x86_64-unknown-linux-gnu -Cforce-frame-pointers=yes // [x64-apple] needs-llvm-components: x86 // [x64-apple] compile-flags: --target=x86_64-apple-darwin // [x64-linux] needs-llvm-components: x86 // [x64-linux] compile-flags: --target=x86_64-unknown-linux-gnu #![feature(no_core, lang_items)] #![no_core] #[lang="sized"] trait Sized { } #[lang="copy"] trait Copy { } impl Copy for u32 {} // CHECK: define i32 @peach{{.*}}[[PEACH_ATTRS:\#[0-9]+]] { #[no_mangle] pub fn
(x: u32) -> u32 { x } // CHECK: attributes [[PEACH_ATTRS]] = { // x64-linux-NOT: {{.*}}"frame-pointer"{{.*}} // aarch64-linux-NOT: {{.*}}"frame-pointer"{{.*}} // x64-apple-SAME: {{.*}}"frame-pointer"="all" // force-SAME: {{.*}}"frame-pointer"="all" // aarch64-apple-SAME: {{.*}}"frame-pointer"="non-leaf" // CHECK-SAME: }
peach
shapes.rs
#![no_std] #![no_main] extern crate cortex_m_rt as rt; extern crate nrf52832_hal; extern crate panic_halt; use cortex_m_rt::entry; use cortex_m_semihosting::hprintln; use display_interface_spi::SPIInterfaceNoCS; use embedded_graphics::pixelcolor::Rgb565; use embedded_graphics::prelude::*; use embedded_graphics::primitives::*; use nrf52832_hal::gpio::p0::Parts; use nrf52832_hal::gpio::Level; use nrf52832_hal::spim; use nrf52832_hal::Delay; use st7789::{Orientation, ST7789}; #[entry] fn
() -> ! { let core = nrf52832_hal::pac::CorePeripherals::take().unwrap(); let mut delay = Delay::new(core.SYST); let p = nrf52832_hal::pac::Peripherals::take().unwrap(); let port0 = Parts::new(p.P0); let _backlight = port0.p0_22.into_push_pull_output(Level::Low); // set medium backlight on let rst = port0.p0_26.into_push_pull_output(Level::Low); // reset pin let _cs = port0.p0_25.into_push_pull_output(Level::Low); // keep low while drivign display let dc = port0.p0_18.into_push_pull_output(Level::Low); // data/clock switch let spiclk = port0.p0_02.into_push_pull_output(Level::Low).degrade(); // SPI clock to LCD let spimosi = port0.p0_03.into_push_pull_output(Level::Low).degrade(); // SPI MOSI to LCD let pins = spim::Pins { sck: spiclk, miso: None, mosi: Some(spimosi), }; // create SPI interface let spi = spim::Spim::new(p.SPIM0, pins, spim::Frequency::M8, spim::MODE_3, 122); // display interface abstraction from SPI and DC let di = SPIInterfaceNoCS::new(spi, dc); // create driver let mut display = ST7789::new(di, rst, 240, 240); // initialize display.init(&mut delay).unwrap(); // set default orientation display.set_orientation(Orientation::Landscape).unwrap(); let circle1 = Circle::new(Point::new(128, 64), 64).into_styled(PrimitiveStyle::with_fill(Rgb565::RED)); let circle2 = Circle::new(Point::new(64, 64), 64) .into_styled(PrimitiveStyle::with_stroke(Rgb565::GREEN, 1)); let blue_with_red_outline = PrimitiveStyleBuilder::new() .fill_color(Rgb565::BLUE) .stroke_color(Rgb565::RED) .stroke_width(1) // > 1 is not currently supported in embedded-graphics on triangles .build(); let triangle = Triangle::new( Point::new(40, 120), Point::new(40, 220), Point::new(140, 120), ) .into_styled(blue_with_red_outline); let line = Line::new(Point::new(180, 160), Point::new(239, 239)) .into_styled(PrimitiveStyle::with_stroke(RgbColor::WHITE, 10)); // draw two circles on black background display.clear(Rgb565::BLACK).unwrap(); circle1.draw(&mut display).unwrap(); circle2.draw(&mut display).unwrap(); triangle.draw(&mut display).unwrap(); line.draw(&mut display).unwrap(); hprintln!("Rendering done").unwrap(); loop { continue; // keep optimizer from removing in --release } }
main
cliapp.rs
// This module implements the definition of the command line app. // // It must not have any other imports as also the build.rs file to // automatically generate the completion scripts. use clap::{App, AppSettings, Arg, ArgGroup, Shell}; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); pub const ABOUT: &str = "The official Sentry Relay."; pub fn make_app() -> App<'static, 'static> { App::new("relay") .global_setting(AppSettings::UnifiedHelpMessage) .global_setting(AppSettings::DisableHelpSubcommand) .setting(AppSettings::SubcommandRequiredElseHelp) .setting(AppSettings::GlobalVersion) .max_term_width(79) .help_message("Print this help message.") .version(VERSION) .version_message("Print version information.") .about(ABOUT) .arg( Arg::with_name("config") .value_name("CONFIG") .long("config") .short("c") .global(true) .help("The path to the config folder."), ) .subcommand( App::new("run") .about("Run the relay") .after_help( "This runs the relay in the foreground until it's shut down. It will bind \ to the port and network interface configured in the config file.", ) .arg( Arg::with_name("secret_key") .long("secret-key") .short("s") .value_name("KEY") .requires("public_key") .help("The secret key to set"), ) .arg( Arg::with_name("public_key") .long("public-key") .short("p") .value_name("KEY") .requires("secret_key") .help("The public key to set"), ) .arg( Arg::with_name("id") .long("id") .short("i") .value_name("RELAY_ID") .help("The relay ID to set"), ) .arg( Arg::with_name("upstream") .value_name("UPSTREAM_URL") .takes_value(true) .short("u") .long("upstream") .help("The upstream server URL."), ) .arg( Arg::with_name("host") .value_name("HOST") .takes_value(true) .short("H") .long("host") .help("The host dns name."), ) .arg( Arg::with_name("port") .value_name("PORT") .takes_value(true) .short("P") .long("port") .help("The server port."), ) .arg( Arg::with_name("processing") .long("processing") .help("Enable processing."), ) .arg( Arg::with_name("no_processing") .long("no-processing") .help("Disable processing."), ) .group( ArgGroup::with_name("processing_group") .args(&["processing", "no_processing"]) .multiple(false), ) .arg( Arg::with_name("kafka_broker_url") .value_name("KAFKA_BROKER_URL") .takes_value(true) .long("kafka-broker-url") .help("Kafka broker URL."), ) .arg( Arg::with_name("redis_url") .value_name("REDIS_URL") .takes_value(true) .long("redis-url") .help("Redis server URL."), ) .arg( Arg::with_name("source_id") .value_name("SOURCE_ID") .takes_value(true) .long("source-id") .env("RELAY_SOURCE_ID") .help("Names the current relay in the outcome source."), ), ) .subcommand( App::new("credentials") .setting(AppSettings::SubcommandRequiredElseHelp) .about("Manage the relay credentials") .after_help( "This command can be used to manage the stored credentials of \ the relay. These credentials are used to authenticate with the \ upstream sentry. A sentry organization trusts a certain public \ key and each relay is identified with a unique relay ID.\n\ \n\ Multiple relays can share the same public/secret key pair for as \ long as they use different relay IDs. Once a relay (as identified \ by the ID) has signed in with a certain key it cannot be changed \ any more.", ) .subcommand( App::new("generate") .about("Generate new credentials") .after_help( "This generates new credentials for the relay and stores \ them. In case the relay already has credentials stored \ this command will error unless the '--overwrite' option \ has been passed.", ) .arg( Arg::with_name("overwrite") .long("overwrite") .help("Overwrite already existing credentials instead of failing"), ) .arg( Arg::with_name("stdout") .long("stdout") .help("Write credentials to stdout instead of credentials.json"), ), ) .subcommand( App::new("remove") .about("Remove credentials") .after_help( "This command removes already stored credentials from the \ relay.", ) .arg( Arg::with_name("yes") .long("yes") .help("Do not prompt for confirmation"), ), ) .subcommand( App::new("show") .about("Show currently stored credentials.") .after_help("This prints out the agent ID and public key."), ) .subcommand( App::new("set")
"Credentials can be stored by providing them on the command \ line. If just an agent id (or secret/public key pair) is \ provided that part of the credentials are overwritten. If \ no credentials are stored yet at all and no parameters are \ supplied the command will prompt for the appropriate values.", ) .arg( Arg::with_name("secret_key") .long("secret-key") .short("s") .value_name("KEY") .requires("public_key") .help("The secret key to set"), ) .arg( Arg::with_name("public_key") .long("public-key") .short("p") .value_name("KEY") .requires("secret_key") .help("The public key to set"), ) .arg( Arg::with_name("id") .long("id") .short("i") .value_name("RELAY_ID") .help("The relay ID to set"), ), ), ) .subcommand( App::new("config") .about("Manage the relay config") .after_help( "This command provides basic config management. It can be \ used primarily to initialize a new relay config and to \ print out the current config.", ) .setting(AppSettings::SubcommandRequiredElseHelp) .subcommand( App::new("init") .about("Initialize a new relay config") .after_help( "For new relay installations this will guide through \ the initial config process and create the necessary \ files. It will create an initial config as well as \ set of credentials.", ), ) .subcommand( App::new("show") .about("Show the entire config out for debugging purposes") .after_help( "This dumps out the entire config including the values \ which are not in the config file but filled in from \ defaults. The default output format is YAML but \ a debug format can also be specific which is useful \ to understand how the relay interprets the individual \ values.", ) .arg( Arg::with_name("format") .short("f") .long("format") .possible_values(&["debug", "yaml"]) .default_value("yaml") .help("The output format"), ), ), ) .subcommand( App::new("generate-completions") .about("Generate shell completion file") .after_help( "This generates a completions file for the shell of choice. \ The default selection will be an educated guess for the currently \ running shell.", ) .arg( Arg::with_name("format") .short("f") .long("format") .value_name("FORMAT") .possible_values(&Shell::variants()[..]) .help( "Explicitly pick the shell to generate a completion file \ for. The default is autodetection", ), ), ) }
.about("Set new credentials") .after_help(
partner.go
package managementpartner // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "context" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/tracing" "net/http" ) // PartnerClient is the this API describe ACE Provisioning ManagementPartner type PartnerClient struct { BaseClient } // NewPartnerClient creates an instance of the PartnerClient client. func NewPartnerClient() PartnerClient { return NewPartnerClientWithBaseURI(DefaultBaseURI) } // NewPartnerClientWithBaseURI creates an instance of the PartnerClient client using a custom endpoint. Use this when // interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewPartnerClientWithBaseURI(baseURI string) PartnerClient { return PartnerClient{NewWithBaseURI(baseURI)} } // Create create a management partner for the objectId and tenantId. // Parameters: // partnerID - id of the Partner func (client PartnerClient) Create(ctx context.Context, partnerID string) (result PartnerResponse, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/PartnerClient.Create") defer func() {
sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.CreatePreparer(ctx, partnerID) if err != nil { err = autorest.NewErrorWithError(err, "managementpartner.PartnerClient", "Create", nil, "Failure preparing request") return } resp, err := client.CreateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "managementpartner.PartnerClient", "Create", resp, "Failure sending request") return } result, err = client.CreateResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "managementpartner.PartnerClient", "Create", resp, "Failure responding to request") return } return } // CreatePreparer prepares the Create request. func (client PartnerClient) CreatePreparer(ctx context.Context, partnerID string) (*http.Request, error) { pathParameters := map[string]interface{}{ "partnerId": autorest.Encode("path", partnerID), } const APIVersion = "2018-02-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/providers/Microsoft.ManagementPartner/partners/{partnerId}", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // CreateSender sends the Create request. The method will close the // http.Response Body if it receives an error. func (client PartnerClient) CreateSender(req *http.Request) (*http.Response, error) { return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // CreateResponder handles the response to the Create request. The method always // closes the http.Response Body. func (client PartnerClient) CreateResponder(resp *http.Response) (result PartnerResponse, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // Delete delete the management partner for the objectId and tenantId. // Parameters: // partnerID - id of the Partner func (client PartnerClient) Delete(ctx context.Context, partnerID string) (result autorest.Response, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/PartnerClient.Delete") defer func() { sc := -1 if result.Response != nil { sc = result.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.DeletePreparer(ctx, partnerID) if err != nil { err = autorest.NewErrorWithError(err, "managementpartner.PartnerClient", "Delete", nil, "Failure preparing request") return } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp err = autorest.NewErrorWithError(err, "managementpartner.PartnerClient", "Delete", resp, "Failure sending request") return } result, err = client.DeleteResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "managementpartner.PartnerClient", "Delete", resp, "Failure responding to request") return } return } // DeletePreparer prepares the Delete request. func (client PartnerClient) DeletePreparer(ctx context.Context, partnerID string) (*http.Request, error) { pathParameters := map[string]interface{}{ "partnerId": autorest.Encode("path", partnerID), } const APIVersion = "2018-02-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/providers/Microsoft.ManagementPartner/partners/{partnerId}", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client PartnerClient) DeleteSender(req *http.Request) (*http.Response, error) { return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // DeleteResponder handles the response to the Delete request. The method always // closes the http.Response Body. func (client PartnerClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByClosing()) result.Response = resp return } // Get get the management partner using the partnerId, objectId and tenantId. // Parameters: // partnerID - id of the Partner func (client PartnerClient) Get(ctx context.Context, partnerID string) (result PartnerResponse, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/PartnerClient.Get") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.GetPreparer(ctx, partnerID) if err != nil { err = autorest.NewErrorWithError(err, "managementpartner.PartnerClient", "Get", nil, "Failure preparing request") return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "managementpartner.PartnerClient", "Get", resp, "Failure sending request") return } result, err = client.GetResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "managementpartner.PartnerClient", "Get", resp, "Failure responding to request") return } return } // GetPreparer prepares the Get request. func (client PartnerClient) GetPreparer(ctx context.Context, partnerID string) (*http.Request, error) { pathParameters := map[string]interface{}{ "partnerId": autorest.Encode("path", partnerID), } const APIVersion = "2018-02-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/providers/Microsoft.ManagementPartner/partners/{partnerId}", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client PartnerClient) GetSender(req *http.Request) (*http.Response, error) { return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetResponder handles the response to the Get request. The method always // closes the http.Response Body. func (client PartnerClient) GetResponder(resp *http.Response) (result PartnerResponse, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // Update update the management partner for the objectId and tenantId. // Parameters: // partnerID - id of the Partner func (client PartnerClient) Update(ctx context.Context, partnerID string) (result PartnerResponse, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/PartnerClient.Update") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.UpdatePreparer(ctx, partnerID) if err != nil { err = autorest.NewErrorWithError(err, "managementpartner.PartnerClient", "Update", nil, "Failure preparing request") return } resp, err := client.UpdateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "managementpartner.PartnerClient", "Update", resp, "Failure sending request") return } result, err = client.UpdateResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "managementpartner.PartnerClient", "Update", resp, "Failure responding to request") return } return } // UpdatePreparer prepares the Update request. func (client PartnerClient) UpdatePreparer(ctx context.Context, partnerID string) (*http.Request, error) { pathParameters := map[string]interface{}{ "partnerId": autorest.Encode("path", partnerID), } const APIVersion = "2018-02-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/providers/Microsoft.ManagementPartner/partners/{partnerId}", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client PartnerClient) UpdateSender(req *http.Request) (*http.Response, error) { return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // UpdateResponder handles the response to the Update request. The method always // closes the http.Response Body. func (client PartnerClient) UpdateResponder(resp *http.Response) (result PartnerResponse, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return }
main.py
# Copyright (c) Facebook, Inc. and its affiliates. import argparse import os import sys import pickle import numpy as np import torch from torch.multiprocessing import set_start_method from torch.utils.data import DataLoader, DistributedSampler # 3DETR codebase specific imports from datasets import build_dataset from engine import evaluate, train_one_epoch from models import build_model from optimizer import build_optimizer from criterion import build_criterion from utils.dist import init_distributed, is_distributed, is_primary, get_rank, barrier from utils.misc import my_worker_init_fn from utils.io import save_checkpoint, resume_if_possible from utils.logger import Logger def make_args_parser(): parser = argparse.ArgumentParser("3D Detection Using Transformers", add_help=False) ##### Optimizer ##### parser.add_argument("--base_lr", default=5e-4, type=float) parser.add_argument("--warm_lr", default=1e-6, type=float) parser.add_argument("--warm_lr_epochs", default=9, type=int) parser.add_argument("--final_lr", default=1e-6, type=float) parser.add_argument("--lr_scheduler", default="cosine", type=str) parser.add_argument("--weight_decay", default=0.1, type=float) parser.add_argument("--filter_biases_wd", default=False, action="store_true") parser.add_argument( "--clip_gradient", default=0.1, type=float, help="Max L2 norm of the gradient" ) ##### Model ##### parser.add_argument( "--model_name", default="3detr", type=str, help="Name of the model", choices=["3detr"], ) ### Encoder parser.add_argument( "--enc_type", default="vanilla", choices=["masked", "maskedv2", "vanilla"] ) # Below options are only valid for vanilla encoder parser.add_argument("--enc_nlayers", default=3, type=int) parser.add_argument("--enc_dim", default=256, type=int) parser.add_argument("--enc_ffn_dim", default=128, type=int) parser.add_argument("--enc_dropout", default=0.1, type=float) parser.add_argument("--enc_nhead", default=4, type=int) parser.add_argument("--enc_pos_embed", default=None, type=str) parser.add_argument("--enc_activation", default="relu", type=str) ### Decoder parser.add_argument("--dec_nlayers", default=8, type=int) parser.add_argument("--dec_dim", default=256, type=int) parser.add_argument("--dec_ffn_dim", default=256, type=int) parser.add_argument("--dec_dropout", default=0.1, type=float) parser.add_argument("--dec_nhead", default=4, type=int) ### MLP heads for predicting bounding boxes parser.add_argument("--mlp_dropout", default=0.3, type=float) parser.add_argument( "--nsemcls", default=-1, type=int, help="Number of semantic object classes. Can be inferred from dataset", ) ### Other model params parser.add_argument("--preenc_npoints", default=2048, type=int) parser.add_argument( "--pos_embed", default="fourier", type=str, choices=["fourier", "sine"] ) parser.add_argument("--nqueries", default=256, type=int) parser.add_argument("--use_color", default=False, action="store_true") ##### Set Loss ##### ### Matcher parser.add_argument("--matcher_giou_cost", default=2, type=float) parser.add_argument("--matcher_cls_cost", default=1, type=float) parser.add_argument("--matcher_center_cost", default=0, type=float) parser.add_argument("--matcher_objectness_cost", default=0, type=float) ### Loss Weights parser.add_argument("--loss_giou_weight", default=0, type=float) parser.add_argument("--loss_sem_cls_weight", default=1, type=float) parser.add_argument( "--loss_no_object_weight", default=0.2, type=float ) # "no object" or "background" class for detection parser.add_argument("--loss_angle_cls_weight", default=0.1, type=float) parser.add_argument("--loss_angle_reg_weight", default=0.5, type=float) parser.add_argument("--loss_center_weight", default=5.0, type=float) parser.add_argument("--loss_size_weight", default=1.0, type=float) ##### Dataset ##### parser.add_argument( "--dataset_name", required=True, type=str, choices=["scannet", "sunrgbd"] ) parser.add_argument( "--dataset_root_dir", type=str, default=None, help="Root directory containing the dataset files. \ If None, default values from scannet.py/sunrgbd.py are used", ) # parser.add_argument( # "--meta_data_dir", # type=str, # default=None, # help="Root directory containing the metadata files. \ # If None, default values from scannet.py/sunrgbd.py are used", # ) parser.add_argument("--dataset_num_workers", default=4, type=int) parser.add_argument("--batchsize_per_gpu", default=8, type=int) ##### Training ##### parser.add_argument("--start_epoch", default=-1, type=int) parser.add_argument("--max_epoch", default=720, type=int) parser.add_argument("--eval_every_epoch", default=10, type=int) parser.add_argument("--seed", default=0, type=int) ##### Testing ##### parser.add_argument("--test_only", default=False, action="store_true") parser.add_argument("--test_ckpt", default=None, type=str) ##### I/O ##### parser.add_argument("--checkpoint_dir", default=None, type=str) parser.add_argument("--log_every", default=10, type=int) parser.add_argument("--log_metrics_every", default=20, type=int) parser.add_argument("--save_separate_checkpoint_every_epoch", default=100, type=int) ##### Distributed Training ##### parser.add_argument("--ngpus", default=1, type=int) parser.add_argument("--dist_url", default="tcp://localhost:12345", type=str) return parser def
( args, model, model_no_ddp, optimizer, criterion, dataset_config, dataloaders, best_val_metrics, ): """ Main training loop. This trains the model for `args.max_epoch` epochs and tests the model after every `args.eval_every_epoch`. We always evaluate the final checkpoint and report both the final AP and best AP on the val set. """ num_iters_per_epoch = len(dataloaders["train"]) num_iters_per_eval_epoch = len(dataloaders["test"]) print(f"Model is {model}") print(f"Training started at epoch {args.start_epoch} until {args.max_epoch}.") print(f"One training epoch = {num_iters_per_epoch} iters.") print(f"One eval epoch = {num_iters_per_eval_epoch} iters.") final_eval = os.path.join(args.checkpoint_dir, "final_eval.txt") final_eval_pkl = os.path.join(args.checkpoint_dir, "final_eval.pkl") if os.path.isfile(final_eval): print(f"Found final eval file {final_eval}. Skipping training.") return logger = Logger(args.checkpoint_dir) for epoch in range(args.start_epoch, args.max_epoch): if is_distributed(): dataloaders["train_sampler"].set_epoch(epoch) aps = train_one_epoch( args, epoch, model, optimizer, criterion, dataset_config, dataloaders["train"], logger, ) # latest checkpoint is always stored in checkpoint.pth save_checkpoint( args.checkpoint_dir, model_no_ddp, optimizer, epoch, args, best_val_metrics, filename="checkpoint.pth", ) metrics = aps.compute_metrics() metric_str = aps.metrics_to_str(metrics, per_class=False) metrics_dict = aps.metrics_to_dict(metrics) curr_iter = epoch * len(dataloaders["train"]) if is_primary(): print("==" * 10) print(f"Epoch [{epoch}/{args.max_epoch}]; Metrics {metric_str}") print("==" * 10) logger.log_scalars(metrics_dict, curr_iter, prefix="Train/") if ( epoch > 0 and args.save_separate_checkpoint_every_epoch > 0 and epoch % args.save_separate_checkpoint_every_epoch == 0 ): # separate checkpoints are stored as checkpoint_{epoch}.pth save_checkpoint( args.checkpoint_dir, model_no_ddp, optimizer, epoch, args, best_val_metrics, ) if epoch % args.eval_every_epoch == 0 or epoch == (args.max_epoch - 1): ap_calculator = evaluate( args, epoch, model, criterion, dataset_config, dataloaders["test"], logger, curr_iter, ) metrics = ap_calculator.compute_metrics() ap25 = metrics[0.25]["mAP"] metric_str = ap_calculator.metrics_to_str(metrics, per_class=True) metrics_dict = ap_calculator.metrics_to_dict(metrics) if is_primary(): print("==" * 10) print(f"Evaluate Epoch [{epoch}/{args.max_epoch}]; Metrics {metric_str}") print("==" * 10) logger.log_scalars(metrics_dict, curr_iter, prefix="Test/") if is_primary() and ( len(best_val_metrics) == 0 or best_val_metrics[0.25]["mAP"] < ap25 ): best_val_metrics = metrics filename = "checkpoint_best.pth" save_checkpoint( args.checkpoint_dir, model_no_ddp, optimizer, epoch, args, best_val_metrics, filename=filename, ) print( f"Epoch [{epoch}/{args.max_epoch}] saved current best val checkpoint at {filename}; ap25 {ap25}" ) # always evaluate last checkpoint epoch = args.max_epoch - 1 curr_iter = epoch * len(dataloaders["train"]) ap_calculator = evaluate( args, epoch, model, criterion, dataset_config, dataloaders["test"], logger, curr_iter, ) metrics = ap_calculator.compute_metrics() metric_str = ap_calculator.metrics_to_str(metrics) if is_primary(): print("==" * 10) print(f"Evaluate Final [{epoch}/{args.max_epoch}]; Metrics {metric_str}") print("==" * 10) with open(final_eval, "w") as fh: fh.write("Training Finished.\n") fh.write("==" * 10) fh.write("Final Eval Numbers.\n") fh.write(metric_str) fh.write("\n") fh.write("==" * 10) fh.write("Best Eval Numbers.\n") fh.write(ap_calculator.metrics_to_str(best_val_metrics)) fh.write("\n") with open(final_eval_pkl, "wb") as fh: pickle.dump(metrics, fh) def test_model(args, model, model_no_ddp, criterion, dataset_config, dataloaders): if args.test_ckpt is None or not os.path.isfile(args.test_ckpt): f"Please specify a test checkpoint using --test_ckpt. Found invalid value {args.test_ckpt}" sys.exit(1) sd = torch.load(args.test_ckpt, map_location=torch.device("cpu")) model_no_ddp.load_state_dict(sd["model"]) logger = Logger() criterion = None # do not compute loss for speed-up; Comment out to see test loss epoch = -1 curr_iter = 0 ap_calculator = evaluate( args, epoch, model, criterion, dataset_config, dataloaders["test"], logger, curr_iter, ) metrics = ap_calculator.compute_metrics() metric_str = ap_calculator.metrics_to_str(metrics) if is_primary(): print("==" * 10) print(f"Test model; Metrics {metric_str}") print("==" * 10) def main(local_rank, args): if args.ngpus > 1: print( "Initializing Distributed Training. This is in BETA mode and hasn't been tested thoroughly. Use at your own risk :)" ) print("To get the maximum speed-up consider reducing evaluations on val set by setting --eval_every_epoch to greater than 50") init_distributed( local_rank, global_rank=local_rank, world_size=args.ngpus, dist_url=args.dist_url, dist_backend="nccl", ) print(f"Called with args: {args}") torch.cuda.set_device(local_rank) np.random.seed(args.seed + get_rank()) torch.manual_seed(args.seed + get_rank()) if torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed + get_rank()) datasets, dataset_config = build_dataset(args) model, _ = build_model(args, dataset_config) model = model.cuda(local_rank) model_no_ddp = model if is_distributed(): model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[local_rank] ) criterion = build_criterion(args, dataset_config) criterion = criterion.cuda(local_rank) dataloaders = {} if args.test_only: dataset_splits = ["test"] else: dataset_splits = ["train", "test"] for split in dataset_splits: if split == "train": shuffle = True else: shuffle = False if is_distributed(): sampler = DistributedSampler(datasets[split], shuffle=shuffle) elif shuffle: sampler = torch.utils.data.RandomSampler(datasets[split]) else: sampler = torch.utils.data.SequentialSampler(datasets[split]) dataloaders[split] = DataLoader( datasets[split], sampler=sampler, batch_size=args.batchsize_per_gpu, num_workers=args.dataset_num_workers, worker_init_fn=my_worker_init_fn, ) dataloaders[split + "_sampler"] = sampler if args.test_only: criterion = None # faster evaluation test_model(args, model, model_no_ddp, criterion, dataset_config, dataloaders) else: assert ( args.checkpoint_dir is not None ), f"Please specify a checkpoint dir using --checkpoint_dir" if is_primary() and not os.path.isdir(args.checkpoint_dir): os.makedirs(args.checkpoint_dir, exist_ok=True) optimizer = build_optimizer(args, model_no_ddp) loaded_epoch, best_val_metrics = resume_if_possible( args.checkpoint_dir, model_no_ddp, optimizer ) args.start_epoch = loaded_epoch + 1 do_train( args, model, model_no_ddp, optimizer, criterion, dataset_config, dataloaders, best_val_metrics, ) def launch_distributed(args): world_size = args.ngpus if world_size == 1: main(local_rank=0, args=args) else: torch.multiprocessing.spawn(main, nprocs=world_size, args=(args,)) if __name__ == "__main__": parser = make_args_parser() args = parser.parse_args() try: set_start_method("spawn") except RuntimeError: pass launch_distributed(args)
do_train
multifragment_update.rs
use std::io; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use crate::{rdp::CapabilitySetsError, PduParsing}; #[derive(Debug, PartialEq, Clone)] pub struct MultifragmentUpdate { pub max_request_size: u32, } impl PduParsing for MultifragmentUpdate { type Error = CapabilitySetsError; fn from_buffer(mut buffer: impl io::Read) -> Result<Self, Self::Error> { let max_request_size = buffer.read_u32::<LittleEndian>()?; Ok(Self { max_request_size }) } fn to_buffer(&self, mut buffer: impl io::Write) -> Result<(), Self::Error> { buffer.write_u32::<LittleEndian>(self.max_request_size)?; Ok(()) } fn buffer_length(&self) -> usize { 4 } }
mod test { use super::*; const MULTIFRAGMENT_UPDATE_PDU_BUFFER: [u8; 4] = [0xf4, 0xf3, 0xf2, 0xf1]; const MULTIFRAGMENT_UPDATE_PDU: MultifragmentUpdate = MultifragmentUpdate { max_request_size: 0xf1f2_f3f4, }; #[test] fn from_buffer_correctly_parses_multifragment_update() { assert_eq!( MULTIFRAGMENT_UPDATE_PDU, MultifragmentUpdate::from_buffer(MULTIFRAGMENT_UPDATE_PDU_BUFFER.as_ref()).unwrap() ); } #[test] fn to_buffer_correctly_serializes_multifragment_update() { let expected = MULTIFRAGMENT_UPDATE_PDU_BUFFER.as_ref(); let mut buffer = Vec::with_capacity(expected.len()); MULTIFRAGMENT_UPDATE_PDU.to_buffer(&mut buffer).unwrap(); assert_eq!(expected, buffer.as_slice()); } #[test] fn buffer_length_is_correct_for_multifragment_update() { assert_eq!( MULTIFRAGMENT_UPDATE_PDU_BUFFER.len(), MULTIFRAGMENT_UPDATE_PDU.buffer_length() ); } }
#[cfg(test)]
crypto.py
#crypto.py from urllib.request import urlopen as req from bs4 import BeautifulSoup as soup def rangeprice(name='bitcoin',start='20200101',end='20200131'): url = 'https://coinmarketcap.com/currencies/{}/historical-data/?start={}&end={}'.format(name,start,end) webopen = req(url) page_html = webopen.read() webopen.close() data = soup(page_html,'html.parser') table = data.findAll('tr') list_days = [] list_dict = {} for row in table[3:]: rw = row.findAll('div') days = [] for i,r in enumerate(rw): if i > 0 and i < 5: days.append(float(r.text.replace(',',''))) elif i > 4: days.append(int(r.text.replace(',',''))) else: days.append(r.text.replace(',','')) list_days.append(days) list_dict[days[0]] = {'date':days[0],'open':days[1],'high':days[2],'low':days[3],'close':days[4],'volume':days[5],'marketcap':days[6]} return (list_days,list_dict) def dayprice(name='bitcoin',day='20200131'):
if __name__ == '__main__': L,D = rangeprice('xrp',start='20200105',end='20200131') print(L) print(D) L,D = dayprice('bitcoin','20200131') print(L) print(D)
try: url = 'https://coinmarketcap.com/currencies/{}/historical-data/?start={}&end={}'.format(name,day,day) webopen = req(url) page_html = webopen.read() webopen.close() data = soup(page_html,'html.parser') table = data.findAll('tr') list_days = [] list_dict = {} for row in table[3:]: rw = row.findAll('div') days = [] for i,r in enumerate(rw): if i > 0 and i < 5: days.append(float(r.text.replace(',',''))) elif i > 4: days.append(int(r.text.replace(',',''))) else: days.append(r.text.replace(',','')) list_days.append(days) list_dict[days[0]] = {'date':days[0],'open':days[1],'high':days[2],'low':days[3],'close':days[4],'volume':days[5],'marketcap':days[6]} list_dict = list_dict[list_days[0][0]] list_days = list_days[0] except: list_days = ['Not Found / Connection Loss'] list_dict = {'error':'Not Found / Connection Loss'} return (list_days,list_dict)
blink.py
#!/usr/bin/python """ Turns on an LED on for one second, then off for one second, repeatedly. Most Arduinos have an on-board LED you can control. On the Uno and Leonardo, it is attached to digital pin 13. If you're unsure what pin the on-board LED is connected to on your Arduino model, check the documentation at http://www.arduino.cc """ from pymata_aio.pymata3 import PyMata3 from pymata_aio.constants import Constants # Arduino LED is on pin 13 BOARD_LED = 13 # If you are having problems connecting, you may # wish to add some time the arduino_wait parameter. # replace: # board = PyMata3() # with: # board = PyMata3(arduino_wait=5) # adjust the arduino_wait value to meet the needs # of your computer # instantiate PyMata3 board = PyMata3() def setup():
def loop(): """ Toggle the LED by alternating the values written to the LED pin. Wait 1 second between writes. Also note the use of board.sleep and not time.sleep. :return: """ print("LED On") board.digital_write(BOARD_LED, 1) board.sleep(1.0) print("LED Off") board.digital_write(BOARD_LED, 0) board.sleep(1.0) if __name__ == "__main__": setup() while True: loop()
""" Set the Arduino BOARD_LED pin as an output :return: """ board.set_pin_mode(BOARD_LED, Constants.OUTPUT)
indeed_cursor.py
class
(JobCursor): def __init__(self, title: str, location: str, radius: int = 25): base_url = "https://www.indeed.com/jobs?" self._title = title self._location = location title_esc = ul.quote(self._title, safe='') location_esc = ul.quote(self._location, safe='') req_url = base_url + "q={}&l={}".format(title_esc, location_esc) # TODO
IndeedCursor
lib.rs
//! `awc` is a HTTP and WebSocket client library built on the Actix ecosystem. //! //! ## Making a GET request //! //! ```no_run //! # #[actix_rt::main] //! # async fn main() -> Result<(), awc::error::SendRequestError> { //! let mut client = awc::Client::default(); //! let response = client.get("http://www.rust-lang.org") // <- Create request builder //! .insert_header(("User-Agent", "Actix-web")) //! .send() // <- Send http request //! .await?; //! //! println!("Response: {:?}", response); //! # Ok(()) //! # } //! ``` //! //! ## Making POST requests //! //! ### Raw body contents //! //! ```no_run //! # #[actix_rt::main] //! # async fn main() -> Result<(), awc::error::SendRequestError> { //! let mut client = awc::Client::default(); //! let response = client.post("http://httpbin.org/post") //! .send_body("Raw body contents") //! .await?; //! # Ok(()) //! # } //! ``` //! //! ### Forms //! //! ```no_run //! # #[actix_rt::main] //! # async fn main() -> Result<(), awc::error::SendRequestError> { //! let params = [("foo", "bar"), ("baz", "quux")]; //! //! let mut client = awc::Client::default(); //! let response = client.post("http://httpbin.org/post") //! .send_form(&params) //! .await?; //! # Ok(()) //! # } //! ``` //! //! ### JSON //! //! ```no_run //! # #[actix_rt::main] //! # async fn main() -> Result<(), awc::error::SendRequestError> { //! let request = serde_json::json!({ //! "lang": "rust", //! "body": "json" //! }); //! //! let mut client = awc::Client::default(); //! let response = client.post("http://httpbin.org/post") //! .send_json(&request) //! .await?; //! # Ok(()) //! # } //! ``` //! //! ## WebSocket support //! //! ```no_run //! # #[actix_rt::main] //! # async fn main() -> Result<(), Box<dyn std::error::Error>> { //! use futures_util::{sink::SinkExt, stream::StreamExt}; //! let (_resp, mut connection) = awc::Client::new() //! .ws("ws://echo.websocket.org") //! .connect() //! .await?; //! //! connection //! .send(awc::ws::Message::Text("Echo".into())) //! .await?; //! let response = connection.next().await.unwrap()?; //! # assert_eq!(response, awc::ws::Frame::Text("Echo".as_bytes().into())); //! # Ok(()) //! # } //! ``` #![deny(rust_2018_idioms)] #![allow( clippy::type_complexity, clippy::borrow_interior_mutable_const, clippy::needless_doctest_main )] #![doc(html_logo_url = "https://actix.rs/img/logo.png")] #![doc(html_favicon_url = "https://actix.rs/favicon.ico")] use std::cell::RefCell; use std::convert::TryFrom; use std::rc::Rc; use std::time::Duration; pub use actix_http::{client::Connector, cookie, http}; use actix_http::http::{Error as HttpError, HeaderMap, Method, Uri}; use actix_http::RequestHead; mod builder; mod connect; pub mod error; mod frozen; mod request; mod response; mod sender; pub mod test; pub mod ws; pub use self::builder::ClientBuilder; pub use self::connect::BoxedSocket; pub use self::frozen::{FrozenClientRequest, FrozenSendBuilder}; pub use self::request::ClientRequest; pub use self::response::{ClientResponse, JsonBody, MessageBody}; pub use self::sender::SendClientRequest; use self::connect::{Connect, ConnectorWrapper}; /// An asynchronous HTTP and WebSocket client. /// /// ## Examples /// /// ```rust /// use awc::Client; /// /// #[actix_rt::main] /// async fn main() { /// let mut client = Client::default(); /// /// let res = client.get("http://www.rust-lang.org") // <- Create request builder /// .insert_header(("User-Agent", "Actix-web")) /// .send() // <- Send http request /// .await; // <- send request and wait for response /// /// println!("Response: {:?}", res); /// } /// ``` #[derive(Clone)] pub struct Client(Rc<ClientConfig>); pub(crate) struct ClientConfig { pub(crate) connector: RefCell<Box<dyn Connect>>, pub(crate) headers: HeaderMap, pub(crate) timeout: Option<Duration>, } impl Default for Client { fn default() -> Self { Client(Rc::new(ClientConfig { connector: RefCell::new(Box::new(ConnectorWrapper( Connector::new().finish(), ))), headers: HeaderMap::new(), timeout: Some(Duration::from_secs(5)), })) } } impl Client { /// Create new client instance with default settings. pub fn new() -> Client { Client::default() } /// Create `Client` builder. /// This function is equivalent of `ClientBuilder::new()`. pub fn builder() -> ClientBuilder { ClientBuilder::new() } /// Construct HTTP request. pub fn request<U>(&self, method: Method, url: U) -> ClientRequest where Uri: TryFrom<U>, <Uri as TryFrom<U>>::Error: Into<HttpError>, { let mut req = ClientRequest::new(method, url, self.0.clone()); for header in self.0.headers.iter() { req = req.insert_header_if_none(header); } req } /// Create `ClientRequest` from `RequestHead` /// /// It is useful for proxy requests. This implementation /// copies all headers and the method. pub fn request_from<U>(&self, url: U, head: &RequestHead) -> ClientRequest where Uri: TryFrom<U>, <Uri as TryFrom<U>>::Error: Into<HttpError>, { let mut req = self.request(head.method.clone(), url); for header in head.headers.iter() { req = req.insert_header_if_none(header); } req } /// Construct HTTP *GET* request. pub fn get<U>(&self, url: U) -> ClientRequest where Uri: TryFrom<U>, <Uri as TryFrom<U>>::Error: Into<HttpError>, { self.request(Method::GET, url) } /// Construct HTTP *HEAD* request. pub fn head<U>(&self, url: U) -> ClientRequest where Uri: TryFrom<U>, <Uri as TryFrom<U>>::Error: Into<HttpError>, { self.request(Method::HEAD, url) } /// Construct HTTP *PUT* request. pub fn put<U>(&self, url: U) -> ClientRequest where Uri: TryFrom<U>, <Uri as TryFrom<U>>::Error: Into<HttpError>, { self.request(Method::PUT, url) } /// Construct HTTP *POST* request. pub fn post<U>(&self, url: U) -> ClientRequest where Uri: TryFrom<U>, <Uri as TryFrom<U>>::Error: Into<HttpError>, { self.request(Method::POST, url) } /// Construct HTTP *PATCH* request. pub fn patch<U>(&self, url: U) -> ClientRequest where Uri: TryFrom<U>, <Uri as TryFrom<U>>::Error: Into<HttpError>, { self.request(Method::PATCH, url) } /// Construct HTTP *DELETE* request. pub fn delete<U>(&self, url: U) -> ClientRequest where Uri: TryFrom<U>, <Uri as TryFrom<U>>::Error: Into<HttpError>, { self.request(Method::DELETE, url) } /// Construct HTTP *OPTIONS* request. pub fn
<U>(&self, url: U) -> ClientRequest where Uri: TryFrom<U>, <Uri as TryFrom<U>>::Error: Into<HttpError>, { self.request(Method::OPTIONS, url) } /// Initialize a WebSocket connection. /// Returns a WebSocket connection builder. pub fn ws<U>(&self, url: U) -> ws::WebsocketsRequest where Uri: TryFrom<U>, <Uri as TryFrom<U>>::Error: Into<HttpError>, { let mut req = ws::WebsocketsRequest::new(url, self.0.clone()); for (key, value) in self.0.headers.iter() { req.head.headers.insert(key.clone(), value.clone()); } req } }
options
codegen.rs
// Copyright 2017 The Australian National University // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use ast::ir::*; use ast::ptr::P; use runtime::ValueLocation; use compiler::machine_code::MachineCode; use compiler::backend::{Reg, Mem}; pub trait CodeGenerator { fn start_code(&mut self, func_name: MuName, entry: MuName) -> ValueLocation; fn finish_code(&mut self, func_name: MuName) -> (Box<MachineCode + Sync + Send>, ValueLocation); // generate unnamed sequence of linear code (no branch) fn start_code_sequence(&mut self); fn finish_code_sequence(&mut self) -> Box<MachineCode + Sync + Send>; fn print_cur_code(&self); fn start_block(&mut self, block_name: MuName); fn block_exists(&self, block_name: MuName) -> bool; fn start_exception_block(&mut self, block_name: MuName) -> ValueLocation; fn end_block(&mut self, block_name: MuName); // add CFI info fn add_cfi_sections(&mut self, arg: &str); fn add_cfi_startproc(&mut self); fn add_cfi_endproc(&mut self); fn add_cfi_def_cfa(&mut self, reg: Reg, offset: i32); fn add_cfi_def_cfa_register(&mut self, reg: Reg); fn add_cfi_def_cfa_offset(&mut self, offset: i32); fn add_cfi_offset(&mut self, reg: Reg, offset: i32); //=========================================================================================== // emit code to adjust frame fn emit_frame_grow(&mut self); // Emits a SUB // stack minimpulation fn emit_push_pair(&mut self, src1: Reg, src2: Reg, stack: Reg); // Emits a STP fn emit_pop_pair(&mut self, dest1: Reg, dest2: Reg, stack: Reg); // Emits a LDP // For callee saved loads and stores (flags them so that only they are removed) fn emit_ldr_callee_saved(&mut self, dest: Reg, src: Mem); fn emit_str_callee_saved(&mut self, dest: Mem, src: Reg); //=========================================================================================== /* Bellow ar all ARMv8-A Aarch64 instruction menmonics (with all operand modes) except: PRFM, PRFUM, CRC32* All advanced SIMD instructions (except MOVI) NOTE: with loads and stores the menmonic indicated may be given a suffix indicating the size and signenedness of the access also b_cond's menmononic is 'B.cond' (where cond is the value of the 'cond' parameter) all other instructions have the menmonic being the first word of the function name after emit_ (subsequent words are used to disambiguate different overloads) NOTE unless otherwise indicated: An instruction that dosn't start with an F operates on GPRS, those that start with an F operate on FPRs. All instructions operate on 32-bit and 64-bit registers (but all register arguments must be the same size) Also all arguments that may take the SP can't take the ZR (and vice versa) */ // loads // supports the full full range of addressing modes fn emit_ldr(&mut self, dest: Reg /*GPR or FPR*/, src: Mem, signed: bool); fn emit_ldtr(&mut self, dest: Reg, src: Mem, signed: bool); // [base, #simm9] fn emit_ldur(&mut self, dest: Reg /*GPR or FPR*/, src: Mem, signed: bool); // [base, #simm9] fn emit_ldxr(&mut self, dest: Reg, src: Mem); // [base] fn emit_ldaxr(&mut self, dest: Reg, src: Mem); // [base] fn emit_ldar(&mut self, dest: Reg, src: Mem); // [base] // [base, #simm7], [base], #simm7, [base, #simm7]! fn emit_ldp(&mut self, dest1: Reg, dest2: Reg /*GPR or FPR*/, src: Mem); fn emit_ldxp(&mut self, dest1: Reg, dest2: Reg, src: Mem); // [base] fn emit_ldaxp(&mut self, dest1: Reg, dest2: Reg, src: Mem); // [base] fn emit_ldnp( &mut self, dest1: Reg, /*GPR or FPR*/ dest2: Reg, /*GPR or FPR*/ src: Mem ); // [base, #simm7] // Stores // supports the full full range of addressing modes fn emit_str(&mut self, dest: Mem, src: Reg /*GPR or FPR*/); fn emit_sttr(&mut self, dest: Mem, src: Reg); // [base, #simm9] fn emit_stur(&mut self, dest: Mem, src: Reg /*GPR or FPR*/); // [base, #simm9] fn emit_stlr(&mut self, dest: Mem, src: Reg); // [base] fn emit_stxr(&mut self, dest: Mem, status: Reg, src: Reg); // [base] fn emit_stlxr(&mut self, dest: Mem, status: Reg, src: Reg); // [base] // [base, #simm7], [base], #simm7, [base, #simm7]! fn emit_stp(&mut self, dest: Mem, src1: Reg, src2: Reg); fn emit_stxp(&mut self, dest: Mem, status: Reg, src1: Reg, src2: Reg); // [base] fn emit_stlxp(&mut self, dest: Mem, status: Reg, src1: Reg, src2: Reg); // [base] fn emit_stnp( &mut self, dest: Mem, src1: Reg, /*GPR or FPR*/ src2: Reg /*GPR or FPR*/ ); // [base, #simm7] // Calls fn emit_bl( &mut self, callsite: Option<MuName>, func: MuName, pe: Option<MuName>, args: Vec<P<Value>>, ret: Vec<P<Value>>, is_native: bool ) -> Option<ValueLocation>; fn emit_blr( &mut self, callsite: Option<MuName>, func: Reg, pe: Option<MuName>, args: Vec<P<Value>>, ret: Vec<P<Value>> ) -> Option<ValueLocation>; // Branches fn emit_b(&mut self, dest_name: MuName); fn emit_b_cond(&mut self, cond: &str, dest_name: MuName); fn emit_br(&mut self, dest_address: Reg); fn emit_b_call( &mut self, callsite: Option<MuName>, func: MuName, pe: Option<MuName>, args: Vec<P<Value>>, ret: Vec<P<Value>>, is_native: bool, may_return: bool ) -> Option<ValueLocation>; fn emit_br_call( &mut self, callsite: Option<MuName>, func: Reg, pe: Option<MuName>, args: Vec<P<Value>>, ret: Vec<P<Value>>, may_return: bool ) -> Option<ValueLocation>; fn emit_ret(&mut self, src: Reg); fn emit_cbnz(&mut self, src: Reg, dest_name: MuName); fn emit_cbz(&mut self, src: Reg, dest_name: MuName); fn emit_tbnz(&mut self, src1: Reg, src2: u8, dest_name: MuName); fn emit_tbz(&mut self, src1: Reg, src2: u8, dest_name: MuName); // Read and write flags fn emit_msr(&mut self, dest: &str, src: Reg); fn emit_mrs(&mut self, dest: Reg, src: &str); // Address calculation fn emit_adr(&mut self, dest: Reg, src: Mem); fn emit_adrp(&mut self, dest: Reg, src: Mem); // Unary ops // The SP and ZR cannot both be used fn emit_mov(&mut self, dest: Reg /*GPR or SP or ZR*/, src: Reg /*GPR or SP or ZR*/); fn emit_mvn(&mut self, dest: Reg, src: Reg); fn emit_neg(&mut self, dest: Reg, src: Reg); fn emit_negs(&mut self, dest: Reg, src: Reg); fn emit_ngc(&mut self, dest: Reg, src: Reg); fn emit_ngcs(&mut self, dest: Reg, src: Reg); fn emit_sxtb(&mut self, dest: Reg /*32*/, src: Reg /*32*/); fn emit_sxth(&mut self, dest: Reg /*32*/, src: Reg /*32*/); fn emit_sxtw(&mut self, dest: Reg /*64*/, src: Reg /*32*/); fn emit_uxtb(&mut self, dest: Reg /*32*/, src: Reg /*32*/); fn emit_uxth(&mut self, dest: Reg /*32*/, src: Reg /*32*/); fn emit_cls(&mut self, dest: Reg, src: Reg); fn emit_clz(&mut self, dest: Reg, src: Reg); fn emit_rbit(&mut self, dest: Reg, src: Reg); fn emit_rev(&mut self, dest: Reg, src: Reg); fn emit_rev16(&mut self, dest: Reg, src: Reg); fn emit_rev32(&mut self, dest: Reg /*64*/, src: Reg); fn emit_rev64(&mut self, dest: Reg /*64*/, src: Reg); // alias of REV fn emit_fabs(&mut self, dest: Reg, src: Reg); fn emit_fcvt(&mut self, dest: Reg, src: Reg /*Must have different size*/); fn emit_fcvtas(&mut self, dest: Reg /*GPR, may have different size*/, src: Reg); fn emit_fcvtau(&mut self, dest: Reg /*GPR, may have different size*/, src: Reg); fn emit_fcvtms(&mut self, dest: Reg /*GPR, may have different size*/, src: Reg); fn emit_fcvtmu(&mut self, dest: Reg /*GPR, may have different size*/, src: Reg); fn emit_fcvtns(&mut self, dest: Reg /*GPR, may have different size*/, src: Reg); fn emit_fcvtnu(&mut self, dest: Reg /*GPR, may have different size*/, src: Reg); fn emit_fcvtps(&mut self, dest: Reg /*GPR, may have different size*/, src: Reg); fn emit_fcvtpu(&mut self, dest: Reg /*GPR, may have different size*/, src: Reg); fn emit_fcvtzs(&mut self, dest: Reg /*GPR, may have different size*/, src: Reg); fn emit_fcvtzu(&mut self, dest: Reg /*GPR, may have different size*/, src: Reg); // One register must be an FPR, the other may be a GPR or an FPR fn emit_fmov(&mut self, dest: Reg, src: Reg); fn emit_fneg(&mut self, dest: Reg, src: Reg); fn emit_frinta(&mut self, dest: Reg, src: Reg); fn emit_frinti(&mut self, dest: Reg, src: Reg); fn emit_frintm(&mut self, dest: Reg, src: Reg); fn emit_frintn(&mut self, dest: Reg, src: Reg); fn emit_frintp(&mut self, dest: Reg, src: Reg); fn emit_frintx(&mut self, dest: Reg, src: Reg); fn emit_frintz(&mut self, dest: Reg, src: Reg); fn emit_fsqrt(&mut self, dest: Reg, src: Reg); fn emit_scvtf(&mut self, dest: Reg /*FPR, may have different size*/, src: Reg); fn emit_ucvtf(&mut self, dest: Reg /*FPR, may have different size*/, src: Reg); // Unary operations with shift fn emit_mov_shift(&mut self, dest: Reg, src: Reg, shift: &str, ammount: u8); fn emit_mvn_shift(&mut self, dest: Reg, src: Reg, shift: &str, ammount: u8); fn emit_neg_shift(&mut self, dest: Reg, src: Reg, shift: &str, ammount: u8); fn emit_negs_shift(&mut self, dest: Reg, src: Reg, shift: &str, ammount: u8); // Unary operations with immediates fn emit_mov_imm(&mut self, dest: Reg, src: u64); fn emit_movz(&mut self, dest: Reg, src: u16, shift: u8); fn emit_movk(&mut self, dest: Reg, src: u16, shift: u8); fn emit_movn(&mut self, dest: Reg, src: u16, shift: u8);
fn emit_movi(&mut self, dest: Reg /*FPR*/, src: u64); fn emit_fmov_imm(&mut self, dest: Reg, src: f32); // Extended binary ops fn emit_add_ext( &mut self, dest: Reg, /*GPR or SP*/ src1: Reg, /*GPR or SP*/ src2: Reg, signed: bool, shift: u8 ); fn emit_adds_ext( &mut self, dest: Reg, src1: Reg, /*GPR or SP*/ src2: Reg, signed: bool, shift: u8 ); fn emit_sub_ext( &mut self, dest: Reg, /*GPR or SP*/ src1: Reg, /*GPR or SP*/ src2: Reg, signed: bool, shift: u8 ); fn emit_subs_ext( &mut self, dest: Reg, src1: Reg, /*GPR or SP*/ src2: Reg, signed: bool, shift: u8 ); // Multiplication fn emit_mul(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_mneg(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_smulh(&mut self, dest: Reg /*64*/, src1: Reg /*64*/, src2: Reg /*64*/); fn emit_umulh(&mut self, dest: Reg /*64*/, src1: Reg /*64*/, src2: Reg /*64*/); fn emit_smnegl(&mut self, dest: Reg /*64*/, src1: Reg /*32*/, src2: Reg /*32*/); fn emit_smull(&mut self, dest: Reg /*64*/, src1: Reg /*32*/, src2: Reg /*32*/); fn emit_umnegl(&mut self, dest: Reg /*64*/, src1: Reg /*32*/, src2: Reg /*32*/); fn emit_umull(&mut self, dest: Reg /*64*/, src1: Reg /*32*/, src2: Reg /*32*/); // Other binaries fn emit_adc(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_adcs(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_add(&mut self, dest: Reg, src1: Reg /*GPR or SP*/, src2: Reg); fn emit_adds(&mut self, dest: Reg, src1: Reg /*GPR or SP*/, src2: Reg); fn emit_sbc(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_sbcs(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_sub(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_subs(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_sdiv(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_udiv(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_asr(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_asrv(&mut self, dest: Reg, src1: Reg, src2: Reg); // Alias of ASR fn emit_lsl(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_lslv(&mut self, dest: Reg, src1: Reg, src2: Reg); // Alias of LSL fn emit_lsr(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_lsrv(&mut self, dest: Reg, src1: Reg, src2: Reg); // Alias of LSR fn emit_ror(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_bic(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_bics(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_and(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_ands(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_eon(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_eor(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_orn(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_orr(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_fadd(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_fdiv(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_fmax(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_fmaxnm(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_fmin(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_fminm(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_fmul(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_fnmul(&mut self, dest: Reg, src1: Reg, src2: Reg); fn emit_fsub(&mut self, dest: Reg, src1: Reg, src2: Reg); // Binary operations with shift fn emit_add_shift(&mut self, dest: Reg, src1: Reg, src2: Reg, shift: &str, amount: u8); fn emit_adds_shift(&mut self, dest: Reg, src1: Reg, src2: Reg, shift: &str, amount: u8); fn emit_sub_shift(&mut self, dest: Reg, src1: Reg, src2: Reg, shift: &str, amount: u8); fn emit_subs_shift(&mut self, dest: Reg, src1: Reg, src2: Reg, shift: &str, amount: u8); fn emit_bic_shift(&mut self, dest: Reg, src1: Reg, src2: Reg, shift: &str, amount: u8); fn emit_bics_shift(&mut self, dest: Reg, src1: Reg, src2: Reg, shift: &str, amount: u8); fn emit_and_shift(&mut self, dest: Reg, src1: Reg, src2: Reg, shift: &str, amount: u8); fn emit_ands_shift(&mut self, dest: Reg, src1: Reg, src2: Reg, shift: &str, amount: u8); fn emit_eon_shift(&mut self, dest: Reg, src1: Reg, src2: Reg, shift: &str, amount: u8); fn emit_eor_shift(&mut self, dest: Reg, src1: Reg, src2: Reg, shift: &str, amount: u8); fn emit_orn_shift(&mut self, dest: Reg, src1: Reg, src2: Reg, shift: &str, amount: u8); fn emit_orr_shift(&mut self, dest: Reg, src1: Reg, src2: Reg, shift: &str, amount: u8); // binary ops with immediates fn emit_add_imm( &mut self, dest: Reg, /*GPR or SP*/ src1: Reg, /*GPR or SP*/ src2: u16, shift: bool ); fn emit_adds_imm(&mut self, dest: Reg, src1: Reg /*GPR or SP*/, src2: u16, shift: bool); fn emit_sub_imm( &mut self, dest: Reg, /*GPR or SP*/ src1: Reg, /*GPR or SP*/ src2: u16, shift: bool ); fn emit_subs_imm(&mut self, dest: Reg, src1: Reg /*GPR or SP*/, src2: u16, shift: bool); fn emit_and_imm(&mut self, dest: Reg /*GPR or SP*/, src1: Reg, src2: u64); fn emit_ands_imm(&mut self, dest: Reg, src1: Reg, src2: u64); fn emit_eor_imm(&mut self, dest: Reg /*GPR or SP*/, src1: Reg, src2: u64); fn emit_orr_imm(&mut self, dest: Reg /*GPR or SP*/, src1: Reg, src2: u64); fn emit_asr_imm(&mut self, dest: Reg, src1: Reg, src2: u8); fn emit_lsr_imm(&mut self, dest: Reg, src1: Reg, src2: u8); fn emit_lsl_imm(&mut self, dest: Reg, src1: Reg, src2: u8); fn emit_ror_imm(&mut self, dest: Reg, src1: Reg, src2: u8); // ternary ops fn emit_madd(&mut self, dest: Reg, src1: Reg, src2: Reg, src3: Reg); fn emit_msub(&mut self, dest: Reg, src1: Reg, src2: Reg, src3: Reg); fn emit_smaddl( &mut self, dest: Reg, /*64*/ src1: Reg, /*32*/ src2: Reg, /*32*/ src3: Reg /*64*/ ); fn emit_smsubl( &mut self, dest: Reg, /*64*/ src1: Reg, /*32*/ src2: Reg, /*32*/ src3: Reg /*64*/ ); fn emit_umaddl( &mut self, dest: Reg, /*64*/ src1: Reg, /*32*/ src2: Reg, /*32*/ src3: Reg /*64*/ ); fn emit_umsubl( &mut self, dest: Reg, /*64*/ src1: Reg, /*32*/ src2: Reg, /*32*/ src3: Reg /*64*/ ); fn emit_fmadd(&mut self, dest: Reg, src1: Reg, src2: Reg, src3: Reg); fn emit_fmsub(&mut self, dest: Reg, src1: Reg, src2: Reg, src3: Reg); fn emit_fnmadd(&mut self, dest: Reg, src1: Reg, src2: Reg, src3: Reg); fn emit_fnmsub(&mut self, dest: Reg, src1: Reg, src2: Reg, src3: Reg); // Ternary ops with immediates fn emit_bfm(&mut self, dest: Reg, src1: Reg, src2: u8, src3: u8); fn emit_bfi(&mut self, dest: Reg, src1: Reg, src2: u8, src3: u8); fn emit_bfxil(&mut self, dest: Reg, src1: Reg, src2: u8, src3: u8); fn emit_ubfm(&mut self, dest: Reg, src1: Reg, src2: u8, src3: u8); fn emit_ubfx(&mut self, dest: Reg, src1: Reg, src2: u8, src3: u8); fn emit_ubfiz(&mut self, dest: Reg, src1: Reg, src2: u8, src3: u8); fn emit_sbfm(&mut self, dest: Reg, src1: Reg, src2: u8, src3: u8); fn emit_sbfx(&mut self, dest: Reg, src1: Reg, src2: u8, src3: u8); fn emit_sbfiz(&mut self, dest: Reg, src1: Reg, src2: u8, src3: u8); // Comparison (dosn't store a result, only updates flags) fn emit_tst(&mut self, src1: Reg, src2: Reg); fn emit_cmn(&mut self, src1: Reg, src2: Reg); fn emit_cmp(&mut self, src1: Reg, src2: Reg); fn emit_fcmp(&mut self, src1: Reg, src2: Reg); fn emit_fcmpe(&mut self, src1: Reg, src2: Reg); // Comparisons with extension fn emit_cmn_ext(&mut self, src1: Reg /*GPR or SP*/, src2: Reg, signed: bool, shift: u8); fn emit_cmp_ext(&mut self, src1: Reg /*GPR or SP*/, src2: Reg, signed: bool, shift: u8); // Comparisons with shift fn emit_tst_shift(&mut self, src1: Reg, src2: Reg, shift: &str, ammount: u8); fn emit_cmn_shift(&mut self, src1: Reg, src2: Reg, shift: &str, ammount: u8); fn emit_cmp_shift(&mut self, src1: Reg, src2: Reg, shift: &str, ammount: u8); // Immediat Comparisons fn emit_tst_imm(&mut self, src1: Reg, src2: u64); fn emit_cmn_imm(&mut self, src1: Reg /*GPR or SP*/, src2: u16, shift: bool); fn emit_cmp_imm(&mut self, src1: Reg /*GPR or SP*/, src2: u16, shift: bool); // Comparison against 0 fn emit_fcmp_0(&mut self, src: Reg); fn emit_fcmpe_0(&mut self, src: Reg); // Conditional ops fn emit_cset(&mut self, dest: Reg, cond: &str); fn emit_csetm(&mut self, dest: Reg, cond: &str); // Conditional unary ops fn emit_cinc(&mut self, dest: Reg, src: Reg, cond: &str); fn emit_cneg(&mut self, dest: Reg, src: Reg, cond: &str); fn emit_cinv(&mut self, dest: Reg, src: Reg, cond: &str); // Conditional binary ops fn emit_csel(&mut self, dest: Reg, src1: Reg, src2: Reg, cond: &str); fn emit_csinc(&mut self, dest: Reg, src1: Reg, src2: Reg, cond: &str); fn emit_csinv(&mut self, dest: Reg, src1: Reg, src2: Reg, cond: &str); fn emit_csneg(&mut self, dest: Reg, src1: Reg, src2: Reg, cond: &str); fn emit_fcsel(&mut self, dest: Reg, src1: Reg, src2: Reg, cond: &str); // Conditional comparisons fn emit_ccmn(&mut self, src1: Reg, src2: Reg, flags: u8, cond: &str); fn emit_ccmp(&mut self, src1: Reg, src2: Reg, flags: u8, cond: &str); fn emit_fccmp(&mut self, src1: Reg, src2: Reg, flags: u8, cond: &str); fn emit_fccmpe(&mut self, src1: Reg, src2: Reg, flags: u8, cond: &str); // Conditional comparisons (with immediate) fn emit_ccmn_imm(&mut self, src1: Reg, src2: u8, flags: u8, cond: &str); fn emit_ccmp_imm(&mut self, src1: Reg, src2: u8, flags: u8, cond: &str); fn emit_bfc(&mut self, dest: Reg, src1: u8, src2: u8); fn emit_extr(&mut self, dest: Reg, src1: Reg, src2: Reg, src3: u8); // Synchronisation fn emit_dsb(&mut self, option: &str); fn emit_dmb(&mut self, option: &str); fn emit_isb(&mut self, option: &str); fn emit_clrex(&mut self); // Hint instructions fn emit_sevl(&mut self); fn emit_sev(&mut self); fn emit_wfe(&mut self); fn emit_wfi(&mut self); fn emit_yield(&mut self); fn emit_nop(&mut self); fn emit_hint(&mut self, val: u8); // Debug instructions fn emit_drps(&mut self); fn emit_dcps1(&mut self, val: u16); fn emit_dcps2(&mut self, val: u16); fn emit_dcps3(&mut self, val: u16); // System instruction fn emit_dc(&mut self, option: &str, src: Reg); fn emit_at(&mut self, option: &str, src: Reg); fn emit_ic(&mut self, option: &str, src: Reg); fn emit_tlbi(&mut self, option: &str, src: Reg); fn emit_sys(&mut self, imm1: u8, cn: u8, cm: u8, imm2: u8, src: Reg); fn emit_sysl(&mut self, dest: Reg, imm1: u8, cn: u8, cm: u8, imm2: u8); // Exceptiuon instructions (NOTE: these will alter the PC) fn emit_brk(&mut self, val: u16); fn emit_hlt(&mut self, val: u16); fn emit_hvc(&mut self, val: u16); fn emit_smc(&mut self, val: u16); fn emit_svc(&mut self, val: u16); fn emit_eret(&mut self); }
Logchart.py
#!/usr/local/bin/python # coding: utf-8 # Logchart V1.0.0 for python3 # Log Chart # Copyright (C) 2017-2017 Kinghow - [email protected] # Git repository available at https://github.com/kinghows/Logchart import getopt import sys import configparser import os from pyecharts import options as opts from pyecharts.globals import ThemeType from pyecharts.charts import Page from pyecharts.charts import Tab from pyecharts.charts import Line def chart(chart_type,title,xlist,ylist,datas,style,themetype):
__name__=="__main__": config_file="Logchart.ini" logfile_directory = "" monitor_index =["cn_flush_bio","total write bio","total read bio"] mutli_chart_type ="tab" style = {'themetype':'LIGHT','is_smooth':True,'is_show':False,'opacity':0,'datazoom_opts':'inside','toolbox_opts_is_show':True} opt, args = getopt.getopt(sys.argv[1:], "d:m:") for o,v in opt: if o == "-d": logfile_directory = v elif o == "-m": monitor_index = v.split(",") if len(logfile_directory)==0 and os.path.exists(config_file): v ='' config = configparser.ConfigParser() config.read(config_file) logfile_directory = config.get("set","logfile_directory") monitor_index = config.get("set","monitor_index").split(",") mutli_chart_type = config.get("set", "mutli_chart_type") try: v=config.get("set","chartstyle") except: pass else: if v != '': style = eval(v) style_themetype=style.setdefault('themetype','WHITE') if style_themetype=='WHITE': themetype=init_opts=opts.InitOpts(theme=ThemeType.WHITE,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px")) elif style_themetype=='LIGHT': themetype=init_opts=opts.InitOpts(theme=ThemeType.LIGHT,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px")) elif style_themetype=='DARK': themetype=init_opts=opts.InitOpts(theme=ThemeType.DARK,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px")) elif style_themetype=='CHALK': themetype=init_opts=opts.InitOpts(theme=ThemeType.CHALK,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px")) elif style_themetype=='ESSOS': themetype=init_opts=opts.InitOpts(theme=ThemeType.ESSOS,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px")) elif style_themetype=='INFOGRAPHIC': themetype=init_opts=opts.InitOpts(theme=ThemeType.INFOGRAPHIC,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px")) elif style_themetype=='MACARONS': themetype=init_opts=opts.InitOpts(theme=ThemeType.MACARONS,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px")) elif style_themetype=='PURPLE_PASSION': themetype=init_opts=opts.InitOpts(theme=ThemeType.PURPLE_PASSION,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px")) elif style_themetype=='ROMA': themetype=init_opts=opts.InitOpts(theme=ThemeType.ROMA,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px")) elif style_themetype=='ROMANTIC': themetype=init_opts=opts.InitOpts(theme=ThemeType.ROMANTIC,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px")) elif style_themetype=='SHINE': themetype=init_opts=opts.InitOpts(theme=ThemeType.SHINE,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px")) elif style_themetype=='VINTAGE': themetype=init_opts=opts.InitOpts(theme=ThemeType.VINTAGE,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px")) elif style_themetype=='WALDEN': themetype=init_opts=opts.InitOpts(theme=ThemeType.WALDEN,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px")) elif style_themetype=='WESTEROS': themetype=init_opts=opts.InitOpts(theme=ThemeType.WESTEROS,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px")) elif style_themetype=='WONDERLAND': themetype=init_opts=opts.InitOpts(theme=ThemeType.WONDERLAND,width=style.setdefault('Initopts_width',"1000px"), height=style.setdefault('Initopts_height',"600px")) if os.path.exists(logfile_directory): filenames=os.listdir(logfile_directory) for logfilename in filenames: if "omv-debugonoff.log" in logfilename and ".html" not in logfilename and ".gz" not in logfilename: logfile = os.path.join(logfile_directory,logfilename) htmlfile = logfile + '.html' if not os.path.exists(htmlfile): if mutli_chart_type=='page': page = Page() else: page = Tab() xlist=[] datalist=[] cn_flush_bio_p = 0 #title = logfilename[logfilename.index('omv-debugonoff.log-')+19:logfilename.index('omv-debugonoff.log-')+27] srcFile = open(logfile, 'r+') lines = srcFile.readlines() for line in lines: x = line[11:19] #keyv = eval(line[20:].replace("\\n"," ")) #proc_harx = keyv.setdefault("proc_harx",none) #proc_hatx = keyv.setdefault("proc_hatx",none) #proc_lfsm_monitor = keyv.setdefault("proc_lfsm_monitor",none) xlist.append(x) for index in monitor_index: if index in line: if index =="cn_flush_bio": tempv = line[line.index(index)+14:] if ":" in tempv : cn_flush_bio_c = int(tempv[:tempv.index(":")]) if cn_flush_bio_p != 0 : keyv = cn_flush_bio_c-cn_flush_bio_p else: keyv = 0 else: cn_flush_bio_c = 0 keyv = 0 cn_flush_bio_p = cn_flush_bio_c else: tempv = line[line.index(index):] keyv = int(tempv[tempv.index("MB)")+4:tempv.index("mpage/perquery")-1]) data = [] data.append(x) data.append(index) data.append(keyv) datalist.append(data) srcFile.close() for index in monitor_index: if mutli_chart_type=='page': page.add(chart('line',index,xlist,monitor_index,datalist,style,themetype)) else: page.add(chart('line',index,xlist,monitor_index,datalist,style,themetype),index) page.render(path=htmlfile) else: print('Please check '+logfile_directory+' exists!')
zdict={} for i in range(len(ylist)): zdict[ylist[i]]=[] for row in datas: zdict[row[1]].append(str(row[2])) if chart_type == 'line': # 折线图 if style.setdefault('toolbox_opts_is_show',False): toolbox_opts=opts.ToolboxOpts() else: toolbox_opts=None if style.setdefault('datazoom_opts',None)=='horizontal': datazoom_opts=opts.DataZoomOpts() elif style.setdefault('datazoom_opts',None)=='vertical': datazoom_opts=opts.DataZoomOpts(orient="vertical") elif style.setdefault('datazoom_opts',None)=='inside': datazoom_opts=opts.DataZoomOpts(type_="inside") else: datazoom_opts=None c = Line(themetype) c.set_global_opts(title_opts=opts.TitleOpts(title=title,pos_top=style.setdefault('title_pos_top',None), pos_right=style.setdefault('title_pos_right',None)), legend_opts=opts.LegendOpts(pos_top=style.setdefault('legend_pos_top',None), pos_left=style.setdefault('legend_pos_left',None), pos_right=style.setdefault('legend_pos_right',None)), toolbox_opts=toolbox_opts, datazoom_opts=datazoom_opts, xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(rotate=style.setdefault('yaxis_opts_rotate',0), formatter=style.setdefault('xaxis_opts_formatter',"{value}")), axistick_opts=opts.AxisTickOpts(is_align_with_label=True), is_scale=False, boundary_gap=False,), yaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(formatter=style.setdefault('yaxis_opts_formatter',"{value}"))), ) c.add_xaxis(xlist) for i in range(len(ylist)): name = ylist[i] if title == name : c.add_yaxis(name, zdict[name], markpoint_opts=opts.MarkPointOpts(data=[opts.MarkPointItem(type_=style.setdefault('type_',"max"))]), is_smooth=style.setdefault('is_smooth',True), label_opts=opts.LabelOpts(is_show=style.setdefault('is_show',False)), areastyle_opts=opts.AreaStyleOpts(opacity=style.setdefault('opacity',0)) ) return c if
popcount.go
package popcount var pc [256]byte = func() (pc [256]byte) { for i := range pc { pc[i] = pc[i / 2] + byte(i & 1) } return } () func PopCount(x uint64) int { return int(pc[byte(x>>(0*8))] + pc[byte(x>>(1*8))] + pc[byte(x>>(2*8))] + pc[byte(x>>(3*8))] + pc[byte(x>>(4*8))] + pc[byte(x>>(5*8))] + pc[byte(x>>(6*8))] + pc[byte(x>>(7*8))]) } func PopCount2(x uint64) int { var c byte for i := uint64(0); i < 8; i++ { c += pc[byte(x>>(i*8))] } return int(c) } func PopCount3(x uint64) int { c := 0 for x > 0 { if x & 1 == 1
x >>= 1 } return c } func PopCount4(x uint64) int { c := 0 for x != 0 { x = x & (x - 1) c++ } return c }
{ c++ }
taskListSubtasksOptions.js
/* * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for * license information. * * Code generated by Microsoft (R) AutoRest Code Generator. * Changes may cause incorrect behavior and will be lost if the code is * regenerated. */
* */ class TaskListSubtasksOptions { /** * Create a TaskListSubtasksOptions. * @member {string} [select] An OData $select clause. * @member {number} [timeout] The maximum time that the server can spend * processing the request, in seconds. The default is 30 seconds. Default * value: 30 . * @member {uuid} [clientRequestId] The caller-generated request identity, in * the form of a GUID with no decoration such as curly braces, e.g. * 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. * @member {boolean} [returnClientRequestId] Whether the server should return * the client-request-id in the response. Default value: false . * @member {date} [ocpDate] The time the request was issued. Client libraries * typically set this to the current system clock time; set it explicitly if * you are calling the REST API directly. */ constructor() { } /** * Defines the metadata of TaskListSubtasksOptions * * @returns {object} metadata of TaskListSubtasksOptions * */ mapper() { return { required: false, type: { name: 'Composite', className: 'TaskListSubtasksOptions', modelProperties: { select: { required: false, type: { name: 'String' } }, timeout: { required: false, defaultValue: 30, type: { name: 'Number' } }, clientRequestId: { required: false, type: { name: 'String' } }, returnClientRequestId: { required: false, defaultValue: false, type: { name: 'Boolean' } }, ocpDate: { required: false, type: { name: 'DateTimeRfc1123' } } } } }; } } module.exports = TaskListSubtasksOptions;
'use strict'; /** * Additional parameters for listSubtasks operation.
InfiniteImage.ts
import { List } from 'immutable' import { allPointsIncludingPoint, Point, PointFactory } from './Point.js'
function pointToKey(point: Point): string { return `${point.x},${point.y}` } export class InfiniteImage { #points = new Map<string, { point: Point; on: boolean }>() currentDefault = false constructor(startImage?: List<List<boolean>>) { if (startImage) { for (const [y, row] of startImage.entries()) { for (const [x, value] of row.entries()) { const point = PointFactory({ x, y }) if (value) { this.setPointOn(point) } else { this.setPointOff(point) } } } } } public get litPoints(): number { let count = 0 for (const point of this.#points.values()) { if (point.on) { count += 1 } } return count } public setPointOn(point: Point): void { this.setPoint(point, true) } public setPointOff(point: Point): void { this.setPoint(point, false) } public setPoint(point: Point, on: boolean): void { const key = pointToKey(point) this.#points.set(key, { point, on }) } public *everyInterestingPoint(): Generator<{ point: Point; surrounding: boolean[] }, void> { const currentPointMap = new Map(this.#points) const { currentDefault } = this const currentIteratedPoints = new Set<string>() for (const { point: knownTruePoint } of currentPointMap.values()) { for (const touchingPoint of allPointsIncludingPoint(knownTruePoint)) { if (!currentIteratedPoints.has(pointToKey(touchingPoint))) { yield { point: touchingPoint, surrounding: [...allPointsIncludingPoint(touchingPoint)].map((surroundingPoint) => { const pointValue = currentPointMap.get(pointToKey(surroundingPoint)) if (pointValue == null) { return currentDefault } return pointValue.on }), } currentIteratedPoints.add(pointToKey(touchingPoint)) } } } } public printImage(): void { let minX = Number.POSITIVE_INFINITY let maxX = Number.NEGATIVE_INFINITY let minY = Number.POSITIVE_INFINITY let maxY = Number.NEGATIVE_INFINITY for (const { point } of this.#points.values()) { minX = Math.min(minX, point.x) maxX = Math.max(maxX, point.x) minY = Math.min(minY, point.y) maxY = Math.max(maxY, point.y) } console.log(minX, maxX, minY, maxY) for (let y = minY; y <= maxY; y += 1) { for (let x = minX; x <= maxX; x += 1) { const output = this.#points.get(`${x},${y}`)?.on ? '#' : '.' process.stdout.write(output) } process.stdout.write('\n') } } }
x86_64_unknown_freebsd.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use target::Target; pub fn target() -> Target
{ let mut base = super::freebsd_base::opts(); base.cpu = "x86-64".to_string(); base.pre_link_args.push("-m64".to_string()); Target { llvm_target: "x86_64-unknown-freebsd".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), arch: "x86_64".to_string(), target_os: "freebsd".to_string(), target_env: "".to_string(), options: base, } }
test_torch.py
# Copyright 2018-2021 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import pennylane as qml from pennylane import numpy as np pytestmark = pytest.mark.gpu torch = pytest.importorskip("torch") @pytest.mark.skipif(not torch.cuda.is_available(), reason="no cuda support") class TestTorchDevice: def test_device_to_cuda(self): """Checks device executes with cuda is input data is cuda""" dev = qml.device("default.qubit.torch", wires=1) x = torch.tensor(0.1, requires_grad=True, device=torch.device("cuda")) with qml.tape.QuantumTape() as tape: qml.RX(x, wires=0) qml.expval(qml.PauliX(0)) res = dev.execute(tape) assert res.is_cuda assert dev._torch_device == "cuda" res.backward() assert x.grad.is_cuda def test_mixed_devices(self): """Asserts works with both cuda and cpu input data""" dev = qml.device("default.qubit.torch", wires=1) x = torch.tensor(0.1, requires_grad=True, device=torch.device("cuda")) y = torch.tensor(0.2, requires_grad=True, device=torch.device("cpu")) with qml.tape.QuantumTape() as tape: qml.RX(x, wires=0) qml.RY(y, wires=0) qml.expval(qml.PauliX(0)) res = dev.execute(tape) assert res.is_cuda assert dev._torch_device == "cuda" res.backward() assert x.grad.is_cuda # check that this works ygrad = y.grad def test_matrix_input(self): """Test goes to GPU for matrix valued inputs.""" dev = qml.device("default.qubit.torch", wires=1) U = torch.eye(2, requires_grad=False, device=torch.device("cuda")) with qml.tape.QuantumTape() as tape: qml.QubitUnitary(U, wires=0) qml.expval(qml.PauliZ(0)) res = dev.execute(tape) assert res.is_cuda assert dev._torch_device == "cuda" def test_resets(self): """Asserts reverts to cpu after execution on gpu""" dev = qml.device("default.qubit.torch", wires=1) x = torch.tensor(0.1, requires_grad=True, device=torch.device("cuda")) y = torch.tensor(0.2, requires_grad=True, device=torch.device("cpu")) with qml.tape.QuantumTape() as tape1: qml.RX(x, wires=0) qml.expval(qml.PauliZ(0)) res1 = dev.execute(tape1) assert dev._torch_device == "cuda" assert res1.is_cuda with qml.tape.QuantumTape() as tape2: qml.RY(y, wires=0) qml.expval(qml.PauliZ(0)) res2 = dev.execute(tape2) assert dev._torch_device == "cpu" assert not res2.is_cuda def test_integration(self): """Test cuda supported when device created in qnode creation.""" dev = qml.device("default.qubit", wires=1) x = torch.tensor(0.1, requires_grad=True, device=torch.device("cuda")) y = torch.tensor(0.2, requires_grad=True) @qml.qnode(dev, interface="torch", diff_method="backprop") def circ(x, y): qml.RX(x, wires=0) qml.RY(y, wires=0) return qml.expval(qml.PauliZ(0)) res = circ(x, y) assert res.is_cuda assert circ.device._torch_device == "cuda" res.backward() assert x.grad.is_cuda @pytest.mark.parametrize("init_device, par_device", [("cpu", "cuda"), ("cuda", "cpu")]) def test_different_devices_creation_and_parameters_warn(self, init_device, par_device): """Test that a warning is raised if the Torch device specified on PennyLane device creation differs from the Torch device of gate parameters. """ dev = qml.device("default.qubit.torch", wires=1, torch_device=init_device) p = torch.tensor(0.543, dtype=torch.float64, device=par_device) @qml.qnode(dev, interface="torch") def circuit(x): qml.RX(x, wires=0)
UserWarning, match=f"Torch device {init_device} specified upon PennyLane device creation does not match", ): circuit(p) @pytest.mark.skipif(not torch.cuda.is_available(), reason="no cuda support") class TestqnnTorchLayer: def test_torch_device_cuda_if_tensors_on_cuda(self): """Test that if any tensor passed to operators is on the GPU then CUDA is set internally as a device option for 'default.qubit.torch'.""" n_qubits = 3 n_layers = 1 dev = qml.device("default.qubit", wires=n_qubits) @qml.qnode(dev) def circuit(inputs, weights): qml.templates.AngleEmbedding(inputs, wires=range(n_qubits)) qml.templates.BasicEntanglerLayers(weights, wires=range(n_qubits)) return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_qubits)] weight_shapes = {"weights": (n_layers, n_qubits)} qlayer = qml.qnn.TorchLayer(circuit, weight_shapes) x = torch.rand((5, n_qubits), dtype=torch.float64).to(torch.device("cuda")) res = qlayer(x) assert circuit.device.short_name == "default.qubit.torch" assert circuit.device._torch_device == "cuda" assert res.is_cuda loss = torch.sum(res).squeeze() loss.backward() assert loss.is_cuda def test_qnn_torchlayer(self): """Test if TorchLayer can be run on GPU""" n_qubits = 4 dev = qml.device("default.qubit", wires=n_qubits) @qml.qnode(dev, interface="torch") def circuit(inputs, weights): qml.templates.AngleEmbedding(inputs, wires=range(n_qubits)) qml.templates.BasicEntanglerLayers(weights, wires=range(n_qubits)) return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_qubits)] n_layers = 1 weight_shapes = {"weights": (n_layers, n_qubits)} qlayer = qml.qnn.TorchLayer(circuit, weight_shapes) x = torch.rand((5, n_qubits), dtype=torch.float64).to(torch.device("cuda")) res = qlayer(x) assert res.is_cuda loss = torch.sum(res).squeeze() loss.backward() assert loss.is_cuda
return qml.expval(qml.PauliY(0)) with pytest.warns(
rfc2136.go
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package rfc2136 import ( "context" "fmt" "net" "strconv" "strings" "time" "github.com/bodgit/tsig" "github.com/bodgit/tsig/gss" "github.com/miekg/dns" "github.com/pkg/errors" log "github.com/sirupsen/logrus" "sigs.k8s.io/external-dns/endpoint" "sigs.k8s.io/external-dns/plan" "sigs.k8s.io/external-dns/provider" ) const ( // maximum size of a UDP transport message in DNS protocol udpMaxMsgSize = 512 // maximum time DNS client can be off from server for an update to succeed clockSkew = 300 ) // rfc2136 provider type type rfc2136Provider struct { provider.BaseProvider nameserver string zoneName string tsigKeyName string tsigSecret string tsigSecretAlg string insecure bool axfr bool minTTL time.Duration batchChangeSize int // options specific to rfc3645 gss-tsig support gssTsig bool krb5Username string krb5Password string krb5Realm string // only consider hosted zones managing domains ending in this suffix domainFilter endpoint.DomainFilter dryRun bool actions rfc2136Actions } var ( // Map of supported TSIG algorithms tsigAlgs = map[string]string{ "hmac-md5": dns.HmacMD5, "hmac-sha1": dns.HmacSHA1, "hmac-sha224": dns.HmacSHA224, "hmac-sha256": dns.HmacSHA256, "hmac-sha384": dns.HmacSHA384, "hmac-sha512": dns.HmacSHA512, } ) type rfc2136Actions interface { SendMessage(msg *dns.Msg) error IncomeTransfer(m *dns.Msg, a string) (env chan *dns.Envelope, err error) } // NewRfc2136Provider is a factory function for OpenStack rfc2136 providers func NewRfc2136Provider(host string, port int, zoneName string, insecure bool, keyName string, secret string, secretAlg string, axfr bool, domainFilter endpoint.DomainFilter, dryRun bool, minTTL time.Duration, gssTsig bool, krb5Username string, krb5Password string, krb5Realm string, batchChangeSize int, actions rfc2136Actions) (provider.Provider, error) { secretAlgChecked, ok := tsigAlgs[secretAlg] if !ok && !insecure && !gssTsig { return nil, errors.Errorf("%s is not supported TSIG algorithm", secretAlg) } if krb5Realm == "" { krb5Realm = strings.ToUpper(zoneName) } r := &rfc2136Provider{ nameserver: net.JoinHostPort(host, strconv.Itoa(port)), zoneName: dns.Fqdn(zoneName), insecure: insecure, gssTsig: gssTsig, krb5Username: krb5Username, krb5Password: krb5Password, krb5Realm: strings.ToUpper(krb5Realm), domainFilter: domainFilter, dryRun: dryRun, axfr: axfr, minTTL: minTTL, batchChangeSize: batchChangeSize, } if actions != nil { r.actions = actions } else { r.actions = r } if !insecure { r.tsigKeyName = dns.Fqdn(keyName) r.tsigSecret = secret r.tsigSecretAlg = secretAlgChecked } log.Infof("Configured RFC2136 with zone '%s' and nameserver '%s'", r.zoneName, r.nameserver) return r, nil } // KeyName will return TKEY name and TSIG handle to use for followon actions with a secure connection func (r rfc2136Provider) KeyData() (keyName string, handle *gss.Client, err error) { handle, err = gss.NewClient(new(dns.Client)) if err != nil { return keyName, handle, err } rawHost, _, err := net.SplitHostPort(r.nameserver) if err != nil { return keyName, handle, err } keyName, _, err = handle.NegotiateContextWithCredentials(rawHost, r.krb5Realm, r.krb5Username, r.krb5Password) return keyName, handle, err } // Records returns the list of records. func (r rfc2136Provider) Records(ctx context.Context) ([]*endpoint.Endpoint, error) { rrs, err := r.List() if err != nil { return nil, err } var eps []*endpoint.Endpoint OuterLoop: for _, rr := range rrs { log.Debugf("Record=%s", rr) if rr.Header().Class != dns.ClassINET { continue } rrFqdn := rr.Header().Name rrTTL := endpoint.TTL(rr.Header().Ttl) var rrType string var rrValues []string switch rr.Header().Rrtype { case dns.TypeCNAME: rrValues = []string{rr.(*dns.CNAME).Target} rrType = "CNAME" case dns.TypeA: rrValues = []string{rr.(*dns.A).A.String()} rrType = "A" case dns.TypeAAAA: rrValues = []string{rr.(*dns.AAAA).AAAA.String()} rrType = "AAAA" case dns.TypeTXT: rrValues = (rr.(*dns.TXT).Txt) rrType = "TXT" case dns.TypeNS: rrValues = []string{rr.(*dns.NS).Ns} rrType = "NS" default: continue // Unhandled record type } for idx, existingEndpoint := range eps { if existingEndpoint.DNSName == strings.TrimSuffix(rrFqdn, ".") && existingEndpoint.RecordType == rrType { eps[idx].Targets = append(eps[idx].Targets, rrValues...) continue OuterLoop } } ep := endpoint.NewEndpointWithTTL( rrFqdn, rrType, rrTTL, rrValues..., ) eps = append(eps, ep) } return eps, nil } func (r rfc2136Provider) IncomeTransfer(m *dns.Msg, a string) (env chan *dns.Envelope, err error) { t := new(dns.Transfer) if !r.insecure && !r.gssTsig { t.TsigSecret = map[string]string{r.tsigKeyName: r.tsigSecret} } return t.In(m, r.nameserver) } func (r rfc2136Provider) List() ([]dns.RR, error) { if !r.axfr { log.Debug("axfr is disabled") return make([]dns.RR, 0), nil } log.Debugf("Fetching records for '%s'", r.zoneName) m := new(dns.Msg) m.SetAxfr(r.zoneName) if !r.insecure && !r.gssTsig { m.SetTsig(r.tsigKeyName, r.tsigSecretAlg, clockSkew, time.Now().Unix()) } env, err := r.actions.IncomeTransfer(m, r.nameserver) if err != nil { return nil, fmt.Errorf("failed to fetch records via AXFR: %v", err) } records := make([]dns.RR, 0) for e := range env { if e.Error != nil { if e.Error == dns.ErrSoa { log.Error("AXFR error: unexpected response received from the server") } else { log.Errorf("AXFR error: %v", e.Error) } continue } records = append(records, e.RR...) } return records, nil } // ApplyChanges applies a given set of changes in a given zone. func (r rfc2136Provider) ApplyChanges(ctx context.Context, changes *plan.Changes) error { log.Debugf("ApplyChanges (Create: %d, UpdateOld: %d, UpdateNew: %d, Delete: %d)", len(changes.Create), len(changes.UpdateOld), len(changes.UpdateNew), len(changes.Delete)) var errors []error for c, chunk := range chunkBy(changes.Create, r.batchChangeSize) { log.Debugf("Processing batch %d of create changes", c) m := new(dns.Msg) m.SetUpdate(r.zoneName) for _, ep := range chunk { if !r.domainFilter.Match(ep.DNSName) { log.Debugf("Skipping record %s because it was filtered out by the specified --domain-filter", ep.DNSName) continue } r.AddRecord(m, ep) } // only send if there are records available if len(m.Ns) > 0 { err := r.actions.SendMessage(m) if err != nil { log.Errorf("RFC2136 update failed: %v", err) errors = append(errors, err) continue } } } for c, chunk := range chunkBy(changes.UpdateNew, r.batchChangeSize) { log.Debugf("Processing batch %d of update changes", c) m := new(dns.Msg) m.SetUpdate(r.zoneName) for i, ep := range chunk { if !r.domainFilter.Match(ep.DNSName) { log.Debugf("Skipping record %s because it was filtered out by the specified --domain-filter", ep.DNSName) continue } r.UpdateRecord(m, changes.UpdateOld[i], ep) } // only send if there are records available if len(m.Ns) > 0 { err := r.actions.SendMessage(m) if err != nil { log.Errorf("RFC2136 update failed: %v", err) errors = append(errors, err) continue } } } for c, chunk := range chunkBy(changes.Delete, r.batchChangeSize) { log.Debugf("Processing batch %d of delete changes", c) m := new(dns.Msg) m.SetUpdate(r.zoneName) for _, ep := range chunk { if !r.domainFilter.Match(ep.DNSName) { log.Debugf("Skipping record %s because it was filtered out by the specified --domain-filter", ep.DNSName) continue } r.RemoveRecord(m, ep) } // only send if there are records available if len(m.Ns) > 0 { err := r.actions.SendMessage(m) if err != nil { log.Errorf("RFC2136 update failed: %v", err) errors = append(errors, err) continue } } } if len(errors) > 0 { return fmt.Errorf("RFC2136 had errors in one or more of its batches: %v", errors) } return nil } func (r rfc2136Provider) UpdateRecord(m *dns.Msg, oldEp *endpoint.Endpoint, newEp *endpoint.Endpoint) error { err := r.RemoveRecord(m, oldEp) if err != nil { return err } return r.AddRecord(m, newEp) }
var ttl = int64(r.minTTL.Seconds()) if ep.RecordTTL.IsConfigured() && int64(ep.RecordTTL) > ttl { ttl = int64(ep.RecordTTL) } for _, target := range ep.Targets { newRR := fmt.Sprintf("%s %d %s %s", ep.DNSName, ttl, ep.RecordType, target) log.Infof("Adding RR: %s", newRR) rr, err := dns.NewRR(newRR) if err != nil { return fmt.Errorf("failed to build RR: %v", err) } m.Insert([]dns.RR{rr}) } return nil } func (r rfc2136Provider) RemoveRecord(m *dns.Msg, ep *endpoint.Endpoint) error { log.Debugf("RemoveRecord.ep=%s", ep) for _, target := range ep.Targets { newRR := fmt.Sprintf("%s %d %s %s", ep.DNSName, ep.RecordTTL, ep.RecordType, target) log.Infof("Removing RR: %s", newRR) rr, err := dns.NewRR(newRR) if err != nil { return fmt.Errorf("failed to build RR: %v", err) } m.Remove([]dns.RR{rr}) } return nil } func (r rfc2136Provider) SendMessage(msg *dns.Msg) error { if r.dryRun { log.Debugf("SendMessage.skipped") return nil } log.Debugf("SendMessage") c := new(dns.Client) c.SingleInflight = true if !r.insecure { if r.gssTsig { keyName, handle, err := r.KeyData() if err != nil { return err } defer handle.Close() defer handle.DeleteContext(keyName) c.TsigProvider = handle msg.SetTsig(keyName, tsig.GSS, clockSkew, time.Now().Unix()) } else { c.TsigProvider = tsig.HMAC{r.tsigKeyName: r.tsigSecret} msg.SetTsig(r.tsigKeyName, r.tsigSecretAlg, clockSkew, time.Now().Unix()) } } if msg.Len() > udpMaxMsgSize { c.Net = "tcp" } resp, _, err := c.Exchange(msg, r.nameserver) if err != nil { if resp != nil && resp.Rcode != dns.RcodeSuccess { log.Infof("error in dns.Client.Exchange: %s", err) return err } log.Warnf("warn in dns.Client.Exchange: %s", err) } if resp != nil && resp.Rcode != dns.RcodeSuccess { log.Infof("Bad dns.Client.Exchange response: %s", resp) return fmt.Errorf("bad return code: %s", dns.RcodeToString[resp.Rcode]) } log.Debugf("SendMessage.success") return nil } func chunkBy(slice []*endpoint.Endpoint, chunkSize int) [][]*endpoint.Endpoint { var chunks [][]*endpoint.Endpoint for i := 0; i < len(slice); i += chunkSize { end := i + chunkSize if end > len(slice) { end = len(slice) } chunks = append(chunks, slice[i:end]) } return chunks }
func (r rfc2136Provider) AddRecord(m *dns.Msg, ep *endpoint.Endpoint) error { log.Debugf("AddRecord.ep=%s", ep)
config.go
package main import ( "os" ) type config struct { datastoreType string projectID string googleCloudStorageBucketName string } func newConfig() config {
c.datastoreType = getEnv("DATASTORE", "local") c.projectID = getEnv("PROJECT_ID", "twelve-factor-app") c.googleCloudStorageBucketName = getEnv("GOOGLE_CLOUD_BUCKET_NAME", "") return c } func getEnv(key, fallback string) string { if value, ok := os.LookupEnv(key); ok { return value } return fallback }
c := config{}
tbpmr.rs
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::TBPMR { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get() } } #[doc = r" Writes to the register"] #[inline] pub fn
<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = r" Value of the field"] pub struct RESERVED8R { bits: u32, } impl RESERVED8R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } } #[doc = r" Value of the field"] pub struct TBPSMRR { bits: u8, } impl TBPSMRR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Proxy"] pub struct _TBPSMRW<'a> { w: &'a mut W, } impl<'a> _TBPSMRW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 255; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 8:31 - Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."] #[inline] pub fn reserved8(&self) -> RESERVED8R { let bits = { const MASK: u32 = 16777215; const OFFSET: u8 = 8; ((self.bits >> OFFSET) & MASK as u32) as u32 }; RESERVED8R { bits } } #[doc = "Bits 0:7 - GPT Timer B Pre-scale Match Register. In 16 bit mode this field holds bits 23 to 16."] #[inline] pub fn tbpsmr(&self) -> TBPSMRR { let bits = { const MASK: u8 = 255; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u8 }; TBPSMRR { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:7 - GPT Timer B Pre-scale Match Register. In 16 bit mode this field holds bits 23 to 16."] #[inline] pub fn tbpsmr(&mut self) -> _TBPSMRW { _TBPSMRW { w: self } } }
write
panic_trap.rs
use std::io::Read; use std::ops::Deref; use std::panic; use std::sync::{Arc, Mutex}; use shh::{stderr, stdout}; #[derive(Clone)] pub struct PanicDetails { pub payload: String, pub location: String, } impl PanicDetails { fn from_panic_info(info: &panic::PanicInfo) -> PanicDetails { let payload = if let Some(s) = info.payload().downcast_ref::<String>() { s.clone() } else if let Some(s) = info.payload().downcast_ref::<&str>() { s.deref().to_owned() } else { "Opaque panic payload".to_owned() }; let location = info .location() .map(|loc| format!("{}:{}:{}", loc.file(), loc.line(), loc.column())) .unwrap_or_else(|| "Unknown panic location".to_owned()); PanicDetails { payload, location } } } pub struct PanicTrap<T> { pub result: Result<T, PanicDetails>, pub stdout: Vec<u8>, pub stderr: Vec<u8>, } impl<T> PanicTrap<T> { pub fn run<F: FnOnce() -> T>(quiet: bool, f: F) -> PanicTrap<T>
fn run_quietly<F: FnOnce() -> T>(f: F) -> PanicTrap<T> { let mut stdout = stdout().expect("Failed to capture stdout"); let mut stderr = stderr().expect("Failed to capture stderr"); let mut trap = PanicTrap::run_loudly(f); stdout.read_to_end(&mut trap.stdout).unwrap(); stderr.read_to_end(&mut trap.stderr).unwrap(); trap } fn run_loudly<F: FnOnce() -> T>(f: F) -> PanicTrap<T> { let last_panic = Arc::new(Mutex::new(None)); panic::set_hook({ let last_panic = last_panic.clone(); Box::new(move |info| { *last_panic.lock().expect("Last panic mutex poisoned") = Some(PanicDetails::from_panic_info(info)); }) }); let result = panic::catch_unwind(panic::AssertUnwindSafe(f)); let _ = panic::take_hook(); PanicTrap { result: result.map_err(|_| { last_panic .lock() .expect("Last panic mutex poisoned") .take() .expect("Panic occurred but no panic details were set") }), stdout: Vec::new(), stderr: Vec::new(), } } }
{ if quiet { PanicTrap::run_quietly(f) } else { PanicTrap::run_loudly(f) } }
uaagent.py
# -*- coding: utf-8 -*- import random import logging from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware logger = logging.getLogger(__name__) class RotateUserAgentMiddleware(UserAgentMiddleware): """避免被ban策略之一:使用useragent池。 使用注意:需在settings.py中进行相应的设置。 更好的方式是使用: pip install scrapy-fake-useragent DOWNLOADER_MIDDLEWARES = { 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None, 'scrapy_fake_useragent.middleware.RandomUserAgentMiddleware': 400, } """ """Randomly rotate user agents based on a list of predefined ones""" def __init__(self, agents): super(RotateUserAgentMiddleware, self).__init__() self.agents = agents @classmethod def from_crawler(cls, crawler): return cls(crawler.settings.getlist('USER_AGENTS')) def process_request(self, request, spider): ua = random.choice(self.a
request.headers.setdefault('User-Agent', ua) logger.debug('Current UserAgent: ' + ua)
gents)
classes_11.js
var searchData= [ ['savecontroller',['SaveController',['../class_l_i_d_l_1_1_controller_1_1_save_controller.html',1,'LIDL::Controller']]], ['savedtabschangelistener',['SavedTabsChangeListener',['../interfaceorg_1_1schabi_1_1newpipe_1_1settings_1_1tabs_1_1_tabs_manager_1_1_saved_tabs_change_listener.html',1,'org::schabi::newpipe::settings::tabs::TabsManager']]], ['searchfragment',['SearchFragment',['../classorg_1_1schabi_1_1newpipe_1_1fragments_1_1list_1_1search_1_1_search_fragment.html',1,'org::schabi::newpipe::fragments::list::search']]], ['searchhistorydao',['SearchHistoryDAO',['../interfaceorg_1_1schabi_1_1newpipe_1_1database_1_1history_1_1dao_1_1_search_history_d_a_o.html',1,'org::schabi::newpipe::database::history::dao']]], ['searchhistoryentry',['SearchHistoryEntry',['../classorg_1_1schabi_1_1newpipe_1_1database_1_1history_1_1model_1_1_search_history_entry.html',1,'org::schabi::newpipe::database::history::model']]], ['secondarystreamhelper',['SecondaryStreamHelper',['../classorg_1_1schabi_1_1newpipe_1_1util_1_1_secondary_stream_helper.html',1,'org::schabi::newpipe::util']]], ['sectionspageradapter',['SectionsPagerAdapter',['../classorg_1_1schabi_1_1newpipe_1_1about_1_1_about_activity_1_1_sections_pager_adapter.html',1,'org::schabi::newpipe::about::AboutActivity']]], ['segment',['Segment',['../classorg_1_1schabi_1_1newpipe_1_1streams_1_1_web_m_reader_1_1_segment.html',1,'org::schabi::newpipe::streams::WebMReader']]], ['selectchannelfragment',['SelectChannelFragment',['../classorg_1_1schabi_1_1newpipe_1_1settings_1_1_select_channel_fragment.html',1,'org::schabi::newpipe::settings']]], ['selectchannelitemholder',['SelectChannelItemHolder',['../classorg_1_1schabi_1_1newpipe_1_1settings_1_1_select_channel_fragment_1_1_select_channel_adapter_1_1_select_channel_item_holder.html',1,'org::schabi::newpipe::settings::SelectChannelFragment::SelectChannelAdapter']]], ['selectevent',['SelectEvent',['../classorg_1_1schabi_1_1newpipe_1_1player_1_1playqueue_1_1events_1_1_select_event.html',1,'org::schabi::newpipe::player::playqueue::events']]], ['selectkioskfragment',['SelectKioskFragment',['../classorg_1_1schabi_1_1newpipe_1_1settings_1_1_select_kiosk_fragment.html',1,'org::schabi::newpipe::settings']]], ['selectkioskitemholder',['SelectKioskItemHolder',['../classorg_1_1schabi_1_1newpipe_1_1settings_1_1_select_kiosk_fragment_1_1_select_kiosk_adapter_1_1_select_kiosk_item_holder.html',1,'org::schabi::newpipe::settings::SelectKioskFragment::SelectKioskAdapter']]], ['serializedcache',['SerializedCache',['../classorg_1_1schabi_1_1newpipe_1_1util_1_1_serialized_cache.html',1,'org::schabi::newpipe::util']]], ['servicehelper',['ServiceHelper',['../classorg_1_1schabi_1_1newpipe_1_1util_1_1_service_helper.html',1,'org::schabi::newpipe::util']]], ['serviceplayeractivity',['ServicePlayerActivity',['../classorg_1_1schabi_1_1newpipe_1_1player_1_1_service_player_activity.html',1,'org::schabi::newpipe::player']]], ['settingsactivity',['SettingsActivity',['../classorg_1_1schabi_1_1newpipe_1_1settings_1_1_settings_activity.html',1,'org::schabi::newpipe::settings']]], ['settingscontroller',['SettingsController',['../class_l_i_d_l_1_1_controller_1_1_settings_controller.html',1,'LIDL::Controller']]], ['sfx',['SFX',['../struct_l_i_d_l_1_1_s_f_x.html',1,'LIDL']]], ['sfxsettingswidget',['SfxSettingsWidget',['../class_sfx_settings_widget.html',1,'']]], ['sharpinputstream',['SharpInputStream',['../classus_1_1shandian_1_1giga_1_1postprocessing_1_1io_1_1_sharp_input_stream.html',1,'us::shandian::giga::postprocessing::io']]], ['sharpstream',['SharpStream',['../classorg_1_1schabi_1_1newpipe_1_1streams_1_1io_1_1_sharp_stream.html',1,'org::schabi::newpipe::streams::io']]], ['simpleblock',['SimpleBlock',['../classorg_1_1schabi_1_1newpipe_1_1streams_1_1_web_m_reader_1_1_simple_block.html',1,'org::schabi::newpipe::streams::WebMReader']]], ['singleplayqueue',['SinglePlayQueue',['../classorg_1_1schabi_1_1newpipe_1_1player_1_1playqueue_1_1_single_play_queue.html',1,'org::schabi::newpipe::player::playqueue']]], ['sliderspin',['SliderSpin',['../class_slider_spin.html',1,'']]], ['sliderstrategy',['SliderStrategy',['../interfaceorg_1_1schabi_1_1newpipe_1_1util_1_1_slider_strategy.html',1,'org::schabi::newpipe::util']]], ['softwarecomponent',['SoftwareComponent',['../classorg_1_1schabi_1_1newpipe_1_1about_1_1_software_component.html',1,'org::schabi::newpipe::about']]], ['soundboardmainui',['SoundboardMainUI',['../class_soundboard_main_u_i.html',1,'']]], ['soundfile',['SoundFile',['../class_l_i_d_l_1_1_sound_file.html',1,'LIDL']]], ['soundwrapper',['SoundWrapper',['../class_sound_wrapper.html',1,'']]], ['spoiler',['Spoiler',['../class_spoiler.html',1,'']]], ['standardlicenses',['StandardLicenses',['../classorg_1_1schabi_1_1newpipe_1_1about_1_1_standard_licenses.html',1,'org::schabi::newpipe::about']]], ['statelistener',['StateListener',['../interfaceorg_1_1schabi_1_1newpipe_1_1views_1_1_collapsible_view_1_1_state_listener.html',1,'org::schabi::newpipe::views::CollapsibleView']]], ['statesaver',['StateSaver',['../classorg_1_1schabi_1_1newpipe_1_1util_1_1_state_saver.html',1,'org::schabi::newpipe::util']]], ['statisticsplaylistfragment',['StatisticsPlaylistFragment',['../classorg_1_1schabi_1_1newpipe_1_1local_1_1history_1_1_statistics_playlist_fragment.html',1,'org::schabi::newpipe::local::history']]], ['streamdao',['StreamDAO',['../classorg_1_1schabi_1_1newpipe_1_1database_1_1stream_1_1dao_1_1_stream_d_a_o.html',1,'org::schabi::newpipe::database::stream::dao']]], ['streamentity',['StreamEntity',['../classorg_1_1schabi_1_1newpipe_1_1database_1_1stream_1_1model_1_1_stream_entity.html',1,'org::schabi::newpipe::database::stream::model']]],
['streamgridinfoitemholder',['StreamGridInfoItemHolder',['../classorg_1_1schabi_1_1newpipe_1_1info__list_1_1holder_1_1_stream_grid_info_item_holder.html',1,'org::schabi::newpipe::info_list::holder']]], ['streamhistorydao',['StreamHistoryDAO',['../classorg_1_1schabi_1_1newpipe_1_1database_1_1history_1_1dao_1_1_stream_history_d_a_o.html',1,'org::schabi::newpipe::database::history::dao']]], ['streamhistoryentity',['StreamHistoryEntity',['../classorg_1_1schabi_1_1newpipe_1_1database_1_1history_1_1model_1_1_stream_history_entity.html',1,'org::schabi::newpipe::database::history::model']]], ['streamhistoryentry',['StreamHistoryEntry',['../classorg_1_1schabi_1_1newpipe_1_1database_1_1history_1_1model_1_1_stream_history_entry.html',1,'org::schabi::newpipe::database::history::model']]], ['streaminfoitemholder',['StreamInfoItemHolder',['../classorg_1_1schabi_1_1newpipe_1_1info__list_1_1holder_1_1_stream_info_item_holder.html',1,'org::schabi::newpipe::info_list::holder']]], ['streamitemadapter',['StreamItemAdapter',['../classorg_1_1schabi_1_1newpipe_1_1util_1_1_stream_item_adapter.html',1,'org::schabi::newpipe::util']]], ['streamitemadapter_3c_20audiostream_2c_20stream_20_3e',['StreamItemAdapter&lt; AudioStream, Stream &gt;',['../classorg_1_1schabi_1_1newpipe_1_1util_1_1_stream_item_adapter.html',1,'org::schabi::newpipe::util']]], ['streamitemadapter_3c_20subtitlesstream_2c_20stream_20_3e',['StreamItemAdapter&lt; SubtitlesStream, Stream &gt;',['../classorg_1_1schabi_1_1newpipe_1_1util_1_1_stream_item_adapter.html',1,'org::schabi::newpipe::util']]], ['streamitemadapter_3c_20videostream_2c_20audiostream_20_3e',['StreamItemAdapter&lt; VideoStream, AudioStream &gt;',['../classorg_1_1schabi_1_1newpipe_1_1util_1_1_stream_item_adapter.html',1,'org::schabi::newpipe::util']]], ['streamminiinfoitemholder',['StreamMiniInfoItemHolder',['../classorg_1_1schabi_1_1newpipe_1_1info__list_1_1holder_1_1_stream_mini_info_item_holder.html',1,'org::schabi::newpipe::info_list::holder']]], ['streamstatedao',['StreamStateDAO',['../classorg_1_1schabi_1_1newpipe_1_1database_1_1stream_1_1dao_1_1_stream_state_d_a_o.html',1,'org::schabi::newpipe::database::stream::dao']]], ['streamstateentity',['StreamStateEntity',['../classorg_1_1schabi_1_1newpipe_1_1database_1_1stream_1_1model_1_1_stream_state_entity.html',1,'org::schabi::newpipe::database::stream::model']]], ['streamstatisticsentry',['StreamStatisticsEntry',['../classorg_1_1schabi_1_1newpipe_1_1database_1_1stream_1_1_stream_statistics_entry.html',1,'org::schabi::newpipe::database::stream']]], ['styleddelegate',['StyledDelegate',['../class_styled_delegate.html',1,'']]], ['subscriptiondao',['SubscriptionDAO',['../classorg_1_1schabi_1_1newpipe_1_1database_1_1subscription_1_1_subscription_d_a_o.html',1,'org::schabi::newpipe::database::subscription']]], ['subscriptionentity',['SubscriptionEntity',['../classorg_1_1schabi_1_1newpipe_1_1database_1_1subscription_1_1_subscription_entity.html',1,'org::schabi::newpipe::database::subscription']]], ['subscriptionfragment',['SubscriptionFragment',['../classorg_1_1schabi_1_1newpipe_1_1local_1_1subscription_1_1_subscription_fragment.html',1,'org::schabi::newpipe::local::subscription']]], ['subscriptionservice',['SubscriptionService',['../classorg_1_1schabi_1_1newpipe_1_1local_1_1subscription_1_1_subscription_service.html',1,'org::schabi::newpipe::local::subscription']]], ['subscriptionsexportservice',['SubscriptionsExportService',['../classorg_1_1schabi_1_1newpipe_1_1local_1_1subscription_1_1services_1_1_subscriptions_export_service.html',1,'org::schabi::newpipe::local::subscription::services']]], ['subscriptionsimportservice',['SubscriptionsImportService',['../classorg_1_1schabi_1_1newpipe_1_1local_1_1subscription_1_1services_1_1_subscriptions_import_service.html',1,'org::schabi::newpipe::local::subscription::services']]], ['subtitleconverter',['SubtitleConverter',['../classorg_1_1schabi_1_1newpipe_1_1streams_1_1_subtitle_converter.html',1,'org::schabi::newpipe::streams']]], ['suggestionitem',['SuggestionItem',['../classorg_1_1schabi_1_1newpipe_1_1fragments_1_1list_1_1search_1_1_suggestion_item.html',1,'org::schabi::newpipe::fragments::list::search']]], ['suggestionlistadapter',['SuggestionListAdapter',['../classorg_1_1schabi_1_1newpipe_1_1fragments_1_1list_1_1search_1_1_suggestion_list_adapter.html',1,'org::schabi::newpipe::fragments::list::search']]] ];
mod.rs
//! Vendor-specific events for BlueNRG controllers. //! //! The BlueNRG implementation defines several additional events that are packaged as //! vendor-specific events by the Bluetooth HCI. This module defines those events and functions to //! deserialize buffers into them. extern crate bluetooth_hci as hci; pub mod command; use byteorder::{ByteOrder, LittleEndian}; use core::cmp::PartialEq; use core::convert::{TryFrom, TryInto}; use core::fmt::{Debug, Formatter, Result as FmtResult}; use core::mem; use core::time::Duration; pub use hci::types::{ConnectionInterval, ConnectionIntervalError}; pub use hci::{BdAddr, BdAddrType, ConnectionHandle}; /// Vendor-specific events for the BlueNRG-MS controllers. #[allow(clippy::large_enum_variant)] #[derive(Clone, Copy, Debug)] pub enum BlueNRGEvent { /// When the BlueNRG-MS firmware is started normally, it gives this event to the user to /// indicate the system has started. HalInitialized(ResetReason), /// If the host fails to read events from the controller quickly enough, the controller will /// generate this event. This event is never lost; it is inserted as soon as space is available /// in the Tx queue. #[cfg(feature = "ms")] EventsLost(EventFlags), /// The fault data event is automatically sent after the /// [HalInitialized](BlueNRGEvent::HalInitialized) event in case of [NMI or Hard /// fault](ResetReason::Crash). #[cfg(feature = "ms")] CrashReport(FaultData), /// This event is generated by the controller when the limited discoverable mode ends due to /// timeout (180 seconds). GapLimitedDiscoverableTimeout, /// This event is generated when the pairing process has completed successfully or a pairing /// procedure timeout has occurred or the pairing has failed. This is to notify the application /// that we have paired with a remote device so that it can take further actions or to notify /// that a timeout has occurred so that the upper layer can decide to disconnect the link. GapPairingComplete(GapPairingComplete), /// This event is generated by the Security manager to the application when a pass key is /// required for pairing. When this event is received, the application has to respond with the /// `gap_pass_key_response` command. GapPassKeyRequest(ConnectionHandle), /// This event is generated by the Security manager to the application when the application has /// set that authorization is required for reading/writing of attributes. This event will be /// generated as soon as the pairing is complete. When this event is received, /// `gap_authorization_response` command should be used by the application. GapAuthorizationRequest(ConnectionHandle), /// This event is generated when the peripheral security request is successfully sent to the /// central device. GapPeripheralSecurityInitiated, /// This event is generated on the peripheral when a `gap_peripheral_security_request` is called /// to reestablish the bond with the central device but the central device has lost the /// bond. When this event is received, the upper layer has to issue the command /// `gap_allow_rebond` in order to allow the peripheral to continue the pairing process with the /// central device. On the central device, this event is raised when `gap_send_pairing_request` /// is called to reestablish a bond with a peripheral but the peripheral has lost the bond. In /// order to create a new bond the central device has to launch `gap_send_pairing_request` with /// `force_rebond` set to `true`. GapBondLost, /// The event is given by the GAP layer to the upper layers when a device is discovered during /// scanning as a consequence of one of the GAP procedures started by the upper layers. GapDeviceFound(GapDeviceFound), /// This event is sent by the GAP to the upper layers when a procedure previously started has /// been terminated by the upper layer or has completed for any other reason GapProcedureComplete(GapProcedureComplete), /// This event is sent only by a privacy enabled peripheral. The event is sent to the upper /// layers when the peripheral is unsuccessful in resolving the resolvable address of the peer /// device after connecting to it. #[cfg(feature = "ms")] GapAddressNotResolved(ConnectionHandle), /// This event is generated when the reconnection address is generated during the general /// connection establishment procedure. The same address is set to the peer device also as a /// part of the general connection establishment procedure. In order to make use of the /// reconnection address the next time while connecting to the bonded peripheral, the /// application needs to set its own address as well as the peer address to which it wants to /// connect to this reconnection address. #[cfg(not(feature = "ms"))] GapReconnectionAddress(BdAddr), /// This event is generated when the central device responds to the L2CAP connection update /// request packet. For more info see /// [ConnectionParameterUpdateResponse](crate::l2cap::ConnectionParameterUpdateResponse) /// and CommandReject in Bluetooth Core v4.0 spec. L2CapConnectionUpdateResponse(L2CapConnectionUpdateResponse), /// This event is generated when the central device does not respond to the connection update /// request within 30 seconds. L2CapProcedureTimeout(ConnectionHandle), /// The event is given by the L2CAP layer when a connection update request is received from the /// peripheral. The application has to respond by calling /// [`l2cap_connection_parameter_update_response`](crate::l2cap::Commands::connection_parameter_update_response). L2CapConnectionUpdateRequest(L2CapConnectionUpdateRequest), /// This event is generated to the application by the ATT server when a client modifies any /// attribute on the server, as consequence of one of the following ATT procedures: /// - write without response /// - signed write without response /// - write characteristic value /// - write long characteristic value /// - reliable write GattAttributeModified(GattAttributeModified), /// This event is generated when a ATT client procedure completes either with error or /// successfully. GattProcedureTimeout(ConnectionHandle), /// This event is generated in response to an Exchange MTU request. AttExchangeMtuResponse(AttExchangeMtuResponse), /// This event is generated in response to a Find Information Request. See Find Information /// Response in Bluetooth Core v4.0 spec. AttFindInformationResponse(AttFindInformationResponse), /// This event is generated in response to a Find By Type Value Request. AttFindByTypeValueResponse(AttFindByTypeValueResponse), /// This event is generated in response to a Read by Type Request. AttReadByTypeResponse(AttReadByTypeResponse), /// This event is generated in response to a Read Request. AttReadResponse(AttReadResponse), /// This event is generated in response to a Read Blob Request. The value in the response is the /// partial value starting from the offset in the request. See the Bluetooth Core v4.1 spec, Vol /// 3, section 3.4.4.5 and 3.4.4.6. AttReadBlobResponse(AttReadResponse), /// This event is generated in response to a Read Multiple Request. The value in the response is /// the set of values requested from the request. See the Bluetooth Core v4.1 spec, Vol 3, /// section 3.4.4.7 and 3.4.4.8. AttReadMultipleResponse(AttReadResponse), /// This event is generated in response to a Read By Group Type Request. See the Bluetooth Core /// v4.1 spec, Vol 3, section 3.4.4.9 and 3.4.4.10. AttReadByGroupTypeResponse(AttReadByGroupTypeResponse), /// This event is generated in response to a Prepare Write Request. See the Bluetooth Core v4.1 /// spec, Vol 3, Part F, section 3.4.6.1 and 3.4.6.2 AttPrepareWriteResponse(AttPrepareWriteResponse),
/// This event is generated when an indication is received from the server. GattIndication(AttributeValue), /// This event is generated when an notification is received from the server. GattNotification(AttributeValue), /// This event is generated when a GATT client procedure completes either with error or /// successfully. GattProcedureComplete(GattProcedureComplete), /// This event is generated when an Error Response is received from the server. The error /// response can be given by the server at the end of one of the GATT discovery procedures. This /// does not mean that the procedure ended with an error, but this error event is part of the /// procedure itself. AttErrorResponse(AttErrorResponse), /// This event can be generated during a "Discover Characteristics by UUID" procedure or a "Read /// using Characteristic UUID" procedure. The attribute value will be a service declaration as /// defined in Bluetooth Core v4.0 spec, Vol 3, Part G, section 3.3.1), when a "Discover /// Characteristics By UUID" has been started. It will be the value of the Characteristic if a /// "Read using Characteristic UUID" has been performed. /// /// See the Bluetooth Core v4.1 spec, Vol 3, Part G, section 4.6.2 (discover characteristics by /// UUID), and section 4.8.2 (read using characteristic using UUID). GattDiscoverOrReadCharacteristicByUuidResponse(AttributeValue), /// This event is given to the application when a write request, write command or signed write /// command is received by the server from the client. This event will be given to the /// application only if the event bit for this event generation is set when the characteristic /// was added. When this event is received, the application has to check whether the value being /// requested for write is allowed to be written and respond with a GATT Write Response. If the /// write is rejected by the application, then the value of the attribute will not be /// modified. In case of a write request, an error response will be sent to the client, with the /// error code as specified by the application. In case of write/signed write commands, no /// response is sent to the client but the attribute is not modified. /// /// See the Bluetooth Core v4.1 spec, Vol 3, Part F, section 3.4.5. AttWritePermitRequest(AttributeValue), /// This event is given to the application when a read request or read blob request is received /// by the server from the client. This event will be given to the application only if the event /// bit for this event generation is set when the characteristic was added. On receiving this /// event, the application can update the value of the handle if it desires and when done it has /// to use the [`allow_read`](crate::gatt::Commands::allow_read) command to indicate to the /// stack that it can send the response to the client. /// /// See the Bluetooth Core v4.1 spec, Vol 3, Part F, section 3.4.4. AttReadPermitRequest(AttReadPermitRequest), /// This event is given to the application when a read multiple request or read by type request /// is received by the server from the client. This event will be given to the application only /// if the event bit for this event generation is set when the characteristic was added. On /// receiving this event, the application can update the values of the handles if it desires and /// when done it has to send the [`allow_read`](crate::gatt::Commands::allow_read) command to /// indicate to the stack that it can send the response to the client. /// /// See the Bluetooth Core v4.1 spec, Vol 3, Part F, section 3.4.4. AttReadMultiplePermitRequest(AttReadMultiplePermitRequest), /// This event is raised when the number of available TX buffers is above a threshold TH (TH = /// 2). The event will be given only if a previous ACI command returned with /// [InsufficientResources](AttError::InsufficientResources). On receiving this event, the /// application can continue to send notifications by calling `gatt_update_char_value`. #[cfg(feature = "ms")] GattTxPoolAvailable(GattTxPoolAvailable), /// This event is raised on the server when the client confirms the reception of an indication. #[cfg(feature = "ms")] GattServerConfirmation(ConnectionHandle), /// This event is given to the application when a prepare write request is received by the /// server from the client. This event will be given to the application only if the event bit /// for this event generation is set when the characteristic was added. When this event is /// received, the application has to check whether the value being requested for write is /// allowed to be written and respond with the command `gatt_write_response`. Based on the /// response from the application, the attribute value will be modified by the stack. If the /// write is rejected by the application, then the value of the attribute will not be modified /// and an error response will be sent to the client, with the error code as specified by the /// application. #[cfg(feature = "ms")] AttPrepareWritePermitRequest(AttPrepareWritePermitRequest), } /// Enumeration of vendor-specific status codes. #[derive(Copy, Clone, Debug, PartialEq)] #[repr(u8)] pub enum Status { /// The command cannot be executed due to the current state of the device. Failed = 0x41, /// Some parameters are invalid. InvalidParameters = 0x42, /// It is not allowed to start the procedure (e.g. another the procedure is ongoing or cannot be /// started on the given handle). NotAllowed = 0x46, /// Unexpected error. Error = 0x47, /// The address was not resolved. AddressNotResolved = 0x48, /// Failed to read from flash. FlashReadFailed = 0x49, /// Failed to write to flash. FlashWriteFailed = 0x4A, /// Failed to erase flash. FlashEraseFailed = 0x4B, /// Invalid CID InvalidCid = 0x50, /// Timer is not valid TimerNotValidLayer = 0x54, /// Insufficient resources to create the timer TimerInsufficientResources = 0x55, /// Connection signature resolving key (CSRK) is not found. CsrkNotFound = 0x5A, /// Identity resolving key (IRK) is not found IrkNotFound = 0x5B, /// The device is not in the security database. DeviceNotFoundInDatabase = 0x5C, /// The security database is full. SecurityDatabaseFull = 0x5D, /// The device is not bonded. DeviceNotBonded = 0x5E, /// The device is blacklisted. DeviceInBlacklist = 0x5F, /// The handle (service, characteristic, or descriptor) is invalid. InvalidHandle = 0x60, /// A parameter is invalid InvalidParameter = 0x61, /// The characteristic handle is not part of the service. OutOfHandle = 0x62, /// The operation is invalid InvalidOperation = 0x63, /// Insufficient resources to complete the operation. InsufficientResources = 0x64, /// The encryption key size is too small InsufficientEncryptionKeySize = 0x65, /// The characteristic already exists. CharacteristicAlreadyExists = 0x66, /// Returned when no valid slots are available (e.g. when there are no available state /// machines). NoValidSlot = 0x82, /// Returned when a scan window shorter than minimum allowed value has been requested /// (i.e. 2ms). The Rust API should prevent this error from occurring. ScanWindowTooShort = 0x83, /// Returned when the maximum requested interval to be allocated is shorter then the current /// anchor period and a there is no submultiple for the current anchor period that is between /// the minimum and the maximum requested intervals. NewIntervalFailed = 0x84, /// Returned when the maximum requested interval to be allocated is greater than the current /// anchor period and there is no multiple of the anchor period that is between the minimum and /// the maximum requested intervals. IntervalTooLarge = 0x85, /// Returned when the current anchor period or a new one can be found that is compatible to the /// interval range requested by the new slot but the maximum available length that can be /// allocated is less than the minimum requested slot length. LengthFailed = 0x86, /// MCU Library timed out. Timeout = 0xFF, /// MCU library: profile already initialized. ProfileAlreadyInitialized = 0xF0, /// MCU library: A parameter was null. NullParameter = 0xF1, } impl TryFrom<u8> for Status { type Error = hci::BadStatusError; fn try_from(value: u8) -> Result<Self, <Self as TryFrom<u8>>::Error> { match value { 0x41 => Ok(Status::Failed), 0x42 => Ok(Status::InvalidParameters), 0x46 => Ok(Status::NotAllowed), 0x47 => Ok(Status::Error), 0x48 => Ok(Status::AddressNotResolved), 0x49 => Ok(Status::FlashReadFailed), 0x4A => Ok(Status::FlashWriteFailed), 0x4B => Ok(Status::FlashEraseFailed), 0x50 => Ok(Status::InvalidCid), 0x54 => Ok(Status::TimerNotValidLayer), 0x55 => Ok(Status::TimerInsufficientResources), 0x5A => Ok(Status::CsrkNotFound), 0x5B => Ok(Status::IrkNotFound), 0x5C => Ok(Status::DeviceNotFoundInDatabase), 0x5D => Ok(Status::SecurityDatabaseFull), 0x5E => Ok(Status::DeviceNotBonded), 0x5F => Ok(Status::DeviceInBlacklist), 0x60 => Ok(Status::InvalidHandle), 0x61 => Ok(Status::InvalidParameter), 0x62 => Ok(Status::OutOfHandle), 0x63 => Ok(Status::InvalidOperation), 0x64 => Ok(Status::InsufficientResources), 0x65 => Ok(Status::InsufficientEncryptionKeySize), 0x66 => Ok(Status::CharacteristicAlreadyExists), 0x82 => Ok(Status::NoValidSlot), 0x83 => Ok(Status::ScanWindowTooShort), 0x84 => Ok(Status::NewIntervalFailed), 0x85 => Ok(Status::IntervalTooLarge), 0x86 => Ok(Status::LengthFailed), 0xFF => Ok(Status::Timeout), 0xF0 => Ok(Status::ProfileAlreadyInitialized), 0xF1 => Ok(Status::NullParameter), _ => Err(hci::BadStatusError::BadValue(value)), } } } impl Into<u8> for Status { fn into(self) -> u8 { self as u8 } } /// Enumeration of potential errors when sending commands or deserializing events. #[derive(Clone, Copy, Debug, PartialEq)] pub enum BlueNRGError { /// The event is not recoginized. Includes the unknown opcode. UnknownEvent(u16), /// For the [HalInitialized](BlueNRGEvent::HalInitialized) event: the reset reason was not /// recognized. Includes the unrecognized byte. UnknownResetReason(u8), /// For the [EventsLost](BlueNRGEvent::EventsLost) event: The event included unrecognized event /// flags. Includes the entire bitfield. #[cfg(feature = "ms")] BadEventFlags(u64), /// For the [CrashReport](BlueNRGEvent::CrashReport) event: The crash reason was not /// recognized. Includes the unrecognized byte. #[cfg(feature = "ms")] UnknownCrashReason(u8), /// For the [GAP Pairing Complete](BlueNRGEvent::GapPairingComplete) event: The status was not /// recognized. Includes the unrecognized byte. BadGapPairingStatus(u8), /// For the [GAP Device Found](BlueNRGEvent::GapDeviceFound) event: the type of event was not /// recognized. Includes the unrecognized byte. BadGapDeviceFoundEvent(u8), /// For the [GAP Device Found](BlueNRGEvent::GapDeviceFound) event: the type of BDADDR was not /// recognized. Includes the unrecognized byte. BadGapBdAddrType(u8), /// For the [GAP Procedure Complete](BlueNRGEvent::GapProcedureComplete) event: The procedure /// code was not recognized. Includes the unrecognized byte. BadGapProcedure(u8), /// For the [GAP Procedure Complete](BlueNRGEvent::GapProcedureComplete) event: The procedure /// status was not recognized. Includes the unrecognized byte. BadGapProcedureStatus(u8), /// For any L2CAP event: The event data length did not match the expected length. The first /// field is the required length, and the second is the actual length. BadL2CapDataLength(u8, u8), /// For any L2CAP event: The L2CAP length did not match the expected length. The first field is /// the required length, and the second is the actual length. BadL2CapLength(u16, u16), /// For any L2CAP response event: The L2CAP command was rejected, but the rejection reason was /// not recognized. Includes the unknown value. BadL2CapRejectionReason(u16), /// For the [L2CAP Connection Update Response](BlueNRGEvent::L2CapConnectionUpdateResponse) /// event: The code byte did not indicate either Rejected or Updated. Includes the invalid byte. BadL2CapConnectionResponseCode(u8), /// For the [L2CAP Connection Update Response](BlueNRGEvent::L2CapConnectionUpdateResponse) /// event: The command was accepted, but the result was not recognized. It did not indicate the /// parameters were either updated or rejected. Includes the unknown value. BadL2CapConnectionResponseResult(u16), /// For the [L2CAP Connection Update Request](BlueNRGEvent::L2CapConnectionUpdateRequest) event: /// The provided connection interval is invalid. Includes the underlying error. BadConnectionInterval(ConnectionIntervalError), /// For the [L2CAP Connection Update Request](BlueNRGEvent::L2CapConnectionUpdateRequest) event: /// The provided interval is invalid. Potential errors: /// - Either the minimum or maximum is out of range. The minimum value for either is 7.5 ms, and /// the maximum is 4 s. /// - The min is greater than the max /// /// See the Bluetooth specification, Vol 3, Part A, Section 4.20. Versions 4.1, 4.2 and 5.0. /// /// Inclues the provided minimum and maximum, respectively. BadL2CapConnectionUpdateRequestInterval(Duration, Duration), /// For the [L2CAP Connection Update Request](BlueNRGEvent::L2CapConnectionUpdateRequest) event: /// The provided connection latency is invalid. The maximum value for connection latency is /// defined in terms of the timeout and maximum connection interval. /// - `connIntervalMax = Interval Max` /// - `connSupervisionTimeout = Timeout` /// - `maxConnLatency = min(500, ((connSupervisionTimeout / (2 * connIntervalMax)) - 1))` /// /// See the Bluetooth specification, Vol 3, Part A, Section 4.20. Versions 4.1, 4.2 and 5.0. /// /// Inclues the provided value and maximum allowed value, respectively. BadL2CapConnectionUpdateRequestLatency(u16, u16), /// For the [L2CAP Connection Update Request](BlueNRGEvent::L2CapConnectionUpdateRequest) event: /// The provided timeout is invalid. The timeout field shall have a value in the range of 100 ms /// to 32 seconds (inclusive). /// /// See the Bluetooth specification, Vol 3, Part A, Section 4.20. Versions 4.1, 4.2 and 5.0. /// /// Inclues the provided value. BadL2CapConnectionUpdateRequestTimeout(Duration), /// For the [ATT Find Information Response](BlueNRGEvent::AttFindInformationResponse) event: The /// format code is invalid. Includes the unrecognized byte. BadAttFindInformationResponseFormat(u8), /// For the [ATT Find Information Response](BlueNRGEvent::AttFindInformationResponse) event: The /// format code indicated 16-bit UUIDs, but the packet ends with a partial pair. AttFindInformationResponsePartialPair16, /// For the [ATT Find Information Response](BlueNRGEvent::AttFindInformationResponse) event: The /// format code indicated 128-bit UUIDs, but the packet ends with a partial pair. AttFindInformationResponsePartialPair128, /// For the [ATT Find by Type Value Response](BlueNRGEvent::AttFindByTypeValueResponse) event: /// The packet ends with a partial attribute pair. AttFindByTypeValuePartial, /// For the [ATT Read by Type Response](BlueNRGEvent::AttReadByTypeResponse) event: The packet /// ends with a partial attribute handle-value pair. AttReadByTypeResponsePartial, /// For the [ATT Read by Group Type Response](BlueNRGEvent::AttReadByGroupTypeResponse) event: /// The packet ends with a partial attribute data group. AttReadByGroupTypeResponsePartial, /// For the [GATT Procedure Complete](BlueNRGEvent::GattProcedureComplete) event: The status /// code was not recognized. Includes the unrecognized byte. BadGattProcedureStatus(u8), /// For the [ATT Error Response](BlueNRGEvent::AttErrorResponse) event: The request opcode was /// not recognized. Includes the unrecognized byte. BadAttRequestOpcode(u8), /// For the [ATT Error Response](BlueNRGEvent::AttErrorResponse) event: The error code was not /// recognized. Includes the unrecognized byte. BadAttError(u8), /// For the [ATT Read Multiple Permit Request](BlueNRGEvent::AttReadMultiplePermitRequest) /// event: The packet ends with a partial attribute handle. AttReadMultiplePermitRequestPartial, /// For the [HAL Read Config Data](crate::hal::Commands::read_config_data) command complete /// [event](command::ReturnParameters::HalReadConfigData): The returned value has a length that /// does not correspond to a requested parameter. Known lengths are 1, 2, 6, or 16. Includes the /// number of bytes returned. BadConfigParameterLength(usize), /// For the [HAL Get Link Status](crate::hal::Commands::get_link_status) command complete /// [event](command::ReturnParameters::HalGetLinkStatus): One of the bytes representing a link /// state does not represent a known link state. Returns the unknown value. UnknownLinkState(u8), /// For the [GAP Get Security Level](crate::gap::Commands::get_security_level) command complete /// [event](command::ReturnParameters::GapGetSecurityLevel): One of the boolean values /// ([`mitm_protection_required`](command::GapSecurityLevel::mitm_protection_required), /// [`bonding_required`](command::GapSecurityLevel::bonding_required), or /// [`out_of_band_data_present`](command::GapSecurityLevel::out_of_band_data_present)) was /// neither 0 nor 1. The unknown value is provided. BadBooleanValue(u8), /// For the [GAP Get Security Level](crate::gap::Commands::get_security_level) command complete /// [event](command::ReturnParameters::GapGetSecurityLevel): the pass key requirement field was /// an invalid value. The unknown byte is provided. BadPassKeyRequirement(u8), /// For the [GAP Get Bonded Devices](crate::gap::Commands::get_bonded_devices) command complete /// [event](command::ReturnParameters::GapGetBondedDevices): the packat was not long enough to /// contain the number of addresses it claimed to contain. PartialBondedDeviceAddress, /// For the [GAP Get Bonded Devices](crate::gap::Commands::get_bonded_devices) command complete /// [event](command::ReturnParameters::GapGetBondedDevices): one of the address type bytes was /// invalid. Includes the invalid byte. BadBdAddrType(u8), } macro_rules! require_len { ($left:expr, $right:expr) => { if $left.len() != $right { return Err(hci::event::Error::BadLength($left.len(), $right)); } }; } macro_rules! require_len_at_least { ($left:expr, $right:expr) => { if $left.len() < $right { return Err(hci::event::Error::BadLength($left.len(), $right)); } }; } fn first_16<T>(buffer: &[T]) -> &[T] { if buffer.len() < 16 { &buffer } else { &buffer[..16] } } impl hci::event::VendorEvent for BlueNRGEvent { type Error = BlueNRGError; type ReturnParameters = command::ReturnParameters; type Status = Status; fn new(buffer: &[u8]) -> Result<Self, hci::event::Error<BlueNRGError>> { require_len_at_least!(buffer, 2); let event_code = LittleEndian::read_u16(&buffer[0..=1]); match event_code { 0x0001 => Ok(BlueNRGEvent::HalInitialized(to_hal_initialized(buffer)?)), 0x0002 => { #[cfg(feature = "ms")] { Ok(BlueNRGEvent::EventsLost(to_lost_event(buffer)?)) } #[cfg(not(feature = "ms"))] { Err(hci::event::Error::Vendor(BlueNRGError::UnknownEvent( event_code, ))) } } 0x0003 => { #[cfg(feature = "ms")] { Ok(BlueNRGEvent::CrashReport(to_crash_report(buffer)?)) } #[cfg(not(feature = "ms"))] { Err(hci::event::Error::Vendor(BlueNRGError::UnknownEvent( event_code, ))) } } 0x0400 => Ok(BlueNRGEvent::GapLimitedDiscoverableTimeout), 0x0401 => Ok(BlueNRGEvent::GapPairingComplete(to_gap_pairing_complete( buffer, )?)), 0x0402 => Ok(BlueNRGEvent::GapPassKeyRequest(to_conn_handle(buffer)?)), 0x0403 => Ok(BlueNRGEvent::GapAuthorizationRequest(to_conn_handle( buffer, )?)), 0x0404 => Ok(BlueNRGEvent::GapPeripheralSecurityInitiated), 0x0405 => Ok(BlueNRGEvent::GapBondLost), 0x0406 => Ok(BlueNRGEvent::GapDeviceFound(to_gap_device_found(buffer)?)), 0x0407 => Ok(BlueNRGEvent::GapProcedureComplete( to_gap_procedure_complete(buffer)?, )), 0x0408 => { #[cfg(feature = "ms")] { Ok(BlueNRGEvent::GapAddressNotResolved(to_conn_handle(buffer)?)) } #[cfg(not(feature = "ms"))] { Ok(BlueNRGEvent::GapReconnectionAddress( to_gap_reconnection_address(buffer)?, )) } } 0x0800 => Ok(BlueNRGEvent::L2CapConnectionUpdateResponse( to_l2cap_connection_update_response(buffer)?, )), 0x0801 => Ok(BlueNRGEvent::L2CapProcedureTimeout( to_l2cap_procedure_timeout(buffer)?, )), 0x0802 => Ok(BlueNRGEvent::L2CapConnectionUpdateRequest( to_l2cap_connection_update_request(buffer)?, )), 0x0C01 => Ok(BlueNRGEvent::GattAttributeModified( to_gatt_attribute_modified(buffer)?, )), 0x0C02 => Ok(BlueNRGEvent::GattProcedureTimeout(to_conn_handle(buffer)?)), 0x0C03 => Ok(BlueNRGEvent::AttExchangeMtuResponse( to_att_exchange_mtu_resp(buffer)?, )), 0x0C04 => Ok(BlueNRGEvent::AttFindInformationResponse( to_att_find_information_response(buffer)?, )), 0x0C05 => Ok(BlueNRGEvent::AttFindByTypeValueResponse( to_att_find_by_value_type_response(buffer)?, )), 0x0C06 => Ok(BlueNRGEvent::AttReadByTypeResponse( to_att_read_by_type_response(buffer)?, )), 0x0C07 => Ok(BlueNRGEvent::AttReadResponse(to_att_read_response(buffer)?)), 0x0C08 => Ok(BlueNRGEvent::AttReadBlobResponse(to_att_read_response( buffer, )?)), 0x0C09 => Ok(BlueNRGEvent::AttReadMultipleResponse(to_att_read_response( buffer, )?)), 0x0C0A => Ok(BlueNRGEvent::AttReadByGroupTypeResponse( to_att_read_by_group_type_response(buffer)?, )), 0x0C0C => Ok(BlueNRGEvent::AttPrepareWriteResponse( to_att_prepare_write_response(buffer)?, )), 0x0C0D => Ok(BlueNRGEvent::AttExecuteWriteResponse(to_conn_handle( buffer, )?)), 0x0C0E => Ok(BlueNRGEvent::GattIndication(to_attribute_value(buffer)?)), 0x0C0F => Ok(BlueNRGEvent::GattNotification(to_attribute_value(buffer)?)), 0x0C10 => Ok(BlueNRGEvent::GattProcedureComplete( to_gatt_procedure_complete(buffer)?, )), 0x0C11 => Ok(BlueNRGEvent::AttErrorResponse(to_att_error_response( buffer, )?)), 0x0C12 => Ok( BlueNRGEvent::GattDiscoverOrReadCharacteristicByUuidResponse(to_attribute_value( buffer, )?), ), 0x0C13 => Ok(BlueNRGEvent::AttWritePermitRequest( to_write_permit_request(buffer)?, )), 0x0C14 => Ok(BlueNRGEvent::AttReadPermitRequest( to_att_read_permit_request(buffer)?, )), 0x0C15 => Ok(BlueNRGEvent::AttReadMultiplePermitRequest( to_att_read_multiple_permit_request(buffer)?, )), 0x0C16 => { #[cfg(feature = "ms")] { Ok(BlueNRGEvent::GattTxPoolAvailable( to_gatt_tx_pool_available(buffer)?, )) } #[cfg(not(feature = "ms"))] { Err(hci::event::Error::Vendor(BlueNRGError::UnknownEvent( event_code, ))) } } 0x0C17 => { #[cfg(feature = "ms")] { Ok(BlueNRGEvent::GattServerConfirmation(to_conn_handle( buffer, )?)) } #[cfg(not(feature = "ms"))] { Err(hci::event::Error::Vendor(BlueNRGError::UnknownEvent( event_code, ))) } } 0x0C18 => { #[cfg(feature = "ms")] { Ok(BlueNRGEvent::AttPrepareWritePermitRequest( to_att_prepare_write_permit_request(buffer)?, )) } #[cfg(not(feature = "ms"))] { Err(hci::event::Error::Vendor(BlueNRGError::UnknownEvent( event_code, ))) } } _ => Err(hci::event::Error::Vendor(BlueNRGError::UnknownEvent( event_code, ))), } } } /// Potential reasons the controller sent the [`HalInitialized`](BlueNRGEvent::HalInitialized) /// event. #[derive(Clone, Copy, Debug, PartialEq)] pub enum ResetReason { /// Firmware started properly Normal, /// Updater mode entered because of updater_start command Updater, /// Updater mode entered because of a bad BLUE flag UpdaterBadFlag, /// Updater mode entered with IRQ pin UpdaterPin, /// Reset caused by watchdog Watchdog, /// Reset due to lockup Lockup, /// Brownout reset Brownout, /// Reset caused by a crash (NMI or Hard Fault) Crash, /// Reset caused by an ECC error EccError, } impl TryFrom<u8> for ResetReason { type Error = BlueNRGError; fn try_from(value: u8) -> Result<Self, Self::Error> { match value { 1 => Ok(ResetReason::Normal), 2 => Ok(ResetReason::Updater), 3 => Ok(ResetReason::UpdaterBadFlag), 4 => Ok(ResetReason::UpdaterPin), 5 => Ok(ResetReason::Watchdog), 6 => Ok(ResetReason::Lockup), 7 => Ok(ResetReason::Brownout), 8 => Ok(ResetReason::Crash), 9 => Ok(ResetReason::EccError), _ => Err(BlueNRGError::UnknownResetReason(value)), } } } /// Convert a buffer to the `HalInitialized` `BlueNRGEvent`. /// /// # Errors /// /// - Returns a `BadLength` HCI error if the buffer is not exactly 3 bytes long /// /// - Returns a `UnknownResetReason` BlueNRG error if the reset reason is not recognized. fn to_hal_initialized(buffer: &[u8]) -> Result<ResetReason, hci::event::Error<BlueNRGError>> { require_len!(buffer, 3); Ok(buffer[2].try_into().map_err(hci::event::Error::Vendor)?) } #[cfg(feature = "ms")] bitflags! { /// Bitfield for the [Events Lost](BlueNRGEvent::EventsLost) event. Each bit indicates a /// different type of event that was not handled. #[derive(Default)] pub struct EventFlags: u64 { /// HCI Event: [Disconnection complete](hci::event::Event::DisconnectionComplete). const DISCONNECTION_COMPLETE = 1 << 0; /// HCI Event: [Encryption change](hci::event::Event::EncryptionChange). const ENCRYPTION_CHANGE = 1 << 1; /// HCI Event: [Read Remote Version /// Complete](hci::event::Event::ReadRemoteVersionInformationComplete). const READ_REMOTE_VERSION_COMPLETE = 1 << 2; /// HCI Event: [Command Complete](hci::event::Event::CommandComplete). const COMMAND_COMPLETE = 1 << 3; /// HCI Event: [Command Status](hci::event::Event::CommandStatus). const COMMAND_STATUS = 1 << 4; /// HCI Event: [Hardware Error](hci::event::Event::HardwareError). const HARDWARE_ERROR = 1 << 5; /// HCI Event: [Number of completed packets](hci::event::Event::NumberOfCompletedPackets). const NUMBER_OF_COMPLETED_PACKETS = 1 << 6; /// HCI Event: [Encryption key refresh /// complete](hci::event::Event::EncryptionKeyRefreshComplete). const ENCRYPTION_KEY_REFRESH = 1 << 7; /// BlueNRG-MS Event: [HAL Initialized](BlueNRGEvent::HalInitialized). const HAL_INITIALIZED = 1 << 8; /// BlueNRG Event: [GAP Set Limited Discoverable /// complete](BlueNRGEvent::GapLimitedDiscoverableTimeout). const GAP_LIMITED_DISCOVERABLE_TIMEOUT = 1 << 9; /// BlueNRG Event: [GAP Pairing complete](BlueNRGEvent::GapPairingComplete). const GAP_PAIRING_COMPLETE = 1 << 10; /// BlueNRG Event: [GAP Pass Key Request](BlueNRGEvent::GapPassKeyRequest). const GAP_PASS_KEY_REQUEST = 1 << 11; /// BlueNRG Event: [GAP Authorization Request](BlueNRGEvent::GapAuthorizationRequest). const GAP_AUTHORIZATION_REQUEST = 1 << 12; /// BlueNRG Event: [GAP Peripheral Security /// Initiated](BlueNRGEvent::GapPeripheralSecurityInitiated). const GAP_PERIPHERAL_SECURITY_INITIATED = 1 << 13; /// BlueNRG Event: [GAP Bond Lost](BlueNRGEvent::GapBondLost). const GAP_BOND_LOST = 1 << 14; /// BlueNRG Event: [GAP Procedure complete](BlueNRGEvent::GapProcedureComplete). const GAP_PROCEDURE_COMPLETE = 1 << 15; /// BlueNRG-MS Event: [GAP Address Not Resolved](BlueNRGEvent::GapAddressNotResolved). const GAP_ADDRESS_NOT_RESOLVED = 1 << 16; /// BlueNRG Event: [L2Cap Connection Update /// Response](BlueNRGEvent::L2CapConnectionUpdateResponse). const L2CAP_CONNECTION_UPDATE_RESPONSE = 1 << 17; /// BlueNRG Event: [L2Cap Procedure Timeout](BlueNRGEvent::L2CapProcedureTimeout). const L2CAP_PROCEDURE_TIMEOUT = 1 << 18; /// BlueNRG Event: [L2Cap Connection Update /// Request](BlueNRGEvent::L2CapConnectionUpdateRequest). const L2CAP_CONNECTION_UPDATE_REQUEST = 1 << 19; /// BlueNRG Event: [GATT Attribute modified](BlueNRGEvent::GattAttributeModified). const GATT_ATTRIBUTE_MODIFIED = 1 << 20; /// BlueNRG Event: [GATT timeout](BlueNRGEvent::GattProcedureTimeout). const GATT_PROCEDURE_TIMEOUT = 1 << 21; /// BlueNRG Event: [Exchange MTU Response](BlueNRGEvent::AttExchangeMtuResponse). const ATT_EXCHANGE_MTU_RESPONSE = 1 << 22; /// BlueNRG Event: [Find information response](BlueNRGEvent::AttFindInformationResponse). const ATT_FIND_INFORMATION_RESPONSE = 1 << 23; /// BlueNRG Event: [Find by type value response](BlueNRGEvent::AttFindByTypeValueResponse). const ATT_FIND_BY_TYPE_VALUE_RESPONSE = 1 << 24; /// BlueNRG Event: [Find read by type response](BlueNRGEvent::AttReadByTypeResponse). const ATT_READ_BY_TYPE_RESPONSE = 1 << 25; /// BlueNRG Event: [Read response](BlueNRGEvent::AttReadResponse). const ATT_READ_RESPONSE = 1 << 26; /// BlueNRG Event: [Read blob response](BlueNRGEvent::AttReadBlobResponse). const ATT_READ_BLOB_RESPONSE = 1 << 27; /// BlueNRG Event: [Read multiple response](BlueNRGEvent::AttReadMultipleResponse). const ATT_READ_MULTIPLE_RESPONSE = 1 << 28; /// BlueNRG Event: [Read by group type response](BlueNRGEvent::AttReadByGroupTypeResponse). const ATT_READ_BY_GROUP_TYPE_RESPONSE = 1 << 29; /// BlueNRG Event: ATT Write Response const ATT_WRITE_RESPONSE = 1 << 30; /// BlueNRG Event: [Prepare Write Response](BlueNRGEvent::AttPrepareWriteResponse). const ATT_PREPARE_WRITE_RESPONSE = 1 << 31; /// BlueNRG Event: [Execute write response](BlueNRGEvent::AttExecuteWriteResponse). const ATT_EXECUTE_WRITE_RESPONSE = 1 << 32; /// BlueNRG Event: [Indication received](BlueNRGEvent::GattIndication) from server. const GATT_INDICATION = 1 << 33; /// BlueNRG Event: [Notification received](BlueNRGEvent::GattNotification) from server. const GATT_NOTIFICATION = 1 << 34; /// BlueNRG Event: [GATT Procedure complete](BlueNRGEvent::GattProcedureComplete). const GATT_PROCEDURE_COMPLETE = 1 << 35; /// BlueNRG Event: [Error response received from server](BlueNRGEvent::AttErrorResponse). const GATT_ERROR_RESPONSE = 1 << 36; /// BlueNRG Event: [Response](BlueNRGEvent::GattDiscoverOrReadCharacteristicByUuidResponse) /// to either "Discover Characteristic by UUID" or "Read Characteristic by UUID" request const GATT_DISCOVER_OR_READ_CHARACTERISTIC_BY_UUID_RESPONSE = 1 << 37; /// BlueNRG Event: [Write request received](BlueNRGEvent::AttWritePermitRequest) by server. const GATT_WRITE_PERMIT_REQUEST = 1 << 38; /// BlueNRG Event: [Read request received](BlueNRGEvent::AttReadPermitRequest) by server. const GATT_READ_PERMIT_REQUEST = 1 << 39; /// BlueNRG Event: [Read multiple request /// received](BlueNRGEvent::AttReadMultiplePermitRequest) by server. const GATT_READ_MULTIPLE_PERMIT_REQUEST = 1 << 40; /// BlueNRG-MS Event: [TX Pool available](BlueNRGEvent::GattTxPoolAvailable) event missed. const GATT_TX_POOL_AVAILABLE = 1 << 41; /// BlueNRG-MS Event: [Server confirmation](BlueNRGEvent::GattServerConfirmation). const GATT_SERVER_RX_CONFIRMATION = 1 << 42; /// BlueNRG-MS Event: [Prepare write permit /// request](BlueNRGEvent::AttPrepareWritePermitRequest). const GATT_PREPARE_WRITE_PERMIT_REQUEST = 1 << 43; /// BlueNRG-MS Event: Link Layer [connection /// complete](hci::event::Event::LeConnectionComplete). const LINK_LAYER_CONNECTION_COMPLETE = 1 << 44; /// BlueNRG-MS Event: Link Layer [advertising /// report](hci::event::Event::LeAdvertisingReport). const LINK_LAYER_ADVERTISING_REPORT = 1 << 45; /// BlueNRG-MS Event: Link Layer [connection update /// complete](hci::event::Event::LeConnectionUpdateComplete). const LINK_LAYER_CONNECTION_UPDATE_COMPLETE = 1 << 46; /// BlueNRG-MS Event: Link Layer [read remote used /// features](hci::event::Event::LeReadRemoteUsedFeaturesComplete). const LINK_LAYER_READ_REMOTE_USED_FEATURES = 1 << 47; /// BlueNRG-MS Event: Link Layer [long-term key /// request](hci::event::Event::LeLongTermKeyRequest). const LINK_LAYER_LTK_REQUEST = 1 << 48; } } /// Convert a buffer to the `EventsLost` `BlueNRGEvent`. /// /// # Errors /// /// - Returns a `BadLength` HCI error if the buffer is not exactly 10 bytes long /// - Returns [`BadEventFlags`](BlueNRGError::BadEventFlags) if a bit is set that does not represent /// a lost event. #[cfg(feature = "ms")] fn to_lost_event(buffer: &[u8]) -> Result<EventFlags, hci::event::Error<BlueNRGError>> { require_len!(buffer, 10); let bits = LittleEndian::read_u64(&buffer[2..]); EventFlags::from_bits(bits) .ok_or_else(|| hci::event::Error::Vendor(BlueNRGError::BadEventFlags(bits))) } // The maximum length of [`FaultData::debug_data`]. The maximum length of an event is 255 bytes, // and the non-variable data of the event takes up 40 bytes. #[cfg(feature = "ms")] const MAX_DEBUG_DATA_LEN: usize = 215; /// Specific reason for the fault reported with [`FaultData`]. #[cfg(feature = "ms")] #[derive(Clone, Copy, Debug, PartialEq)] pub enum CrashReason { /// The controller reset because an assertion failed. Assertion, /// The controller reset because of an NMI fault. NmiFault, /// The controller reset because of a hard fault. HardFault, } #[cfg(feature = "ms")] impl TryFrom<u8> for CrashReason { type Error = BlueNRGError; fn try_from(value: u8) -> Result<Self, Self::Error> { match value { 0 => Ok(CrashReason::Assertion), // The documentation is conflicting for the numeric value of NMI Fault. The // CubeExpansion source code says 1, but the user manual says 6. 1 | 6 => Ok(CrashReason::NmiFault), // The documentation is conflicting for the numeric value of hard Fault. The // CubeExpansion source code says 2, but the user manual says 7. 2 | 7 => Ok(CrashReason::HardFault), _ => Err(BlueNRGError::UnknownCrashReason(value)), } } } /// Fault data reported after a crash. #[cfg(feature = "ms")] #[derive(Clone, Copy)] pub struct FaultData { /// Fault reason. pub reason: CrashReason, /// MCP SP register pub sp: u32, /// MCU R0 register pub r0: u32, /// MCU R1 register pub r1: u32, /// MCU R2 register pub r2: u32, /// MCU R3 register pub r3: u32, /// MCU R12 register pub r12: u32, /// MCU LR register pub lr: u32, /// MCU PC register pub pc: u32, /// MCU xPSR register pub xpsr: u32, // Number of valid bytes in debug_data debug_data_len: usize, // Additional crash dump data debug_data_buf: [u8; MAX_DEBUG_DATA_LEN], } #[cfg(feature = "ms")] impl Debug for FaultData { fn fmt(&self, f: &mut Formatter) -> FmtResult { write!( f, "FaultData {{ reason: {:?}, sp: {:x}, r0: {:x}, r1: {:x}, r2: {:x}, r3: {:x}, ", self.reason, self.sp, self.r0, self.r1, self.r2, self.r3 )?; write!( f, "r12: {:x}, lr: {:x}, pc: {:x}, xpsr: {:x}, debug_data: [", self.r12, self.lr, self.pc, self.xpsr )?; for byte in self.debug_data() { write!(f, " {:x}", byte)?; } write!(f, " ] }}") } } #[cfg(feature = "ms")] impl FaultData { /// Returns the valid debug data. pub fn debug_data(&self) -> &[u8] { &self.debug_data_buf[..self.debug_data_len] } } #[cfg(feature = "ms")] fn to_crash_report(buffer: &[u8]) -> Result<FaultData, hci::event::Error<BlueNRGError>> { require_len_at_least!(buffer, 40); let debug_data_len = buffer[39] as usize; require_len!(buffer, 40 + debug_data_len); let mut fault_data = FaultData { reason: buffer[2].try_into().map_err(hci::event::Error::Vendor)?, sp: LittleEndian::read_u32(&buffer[3..]), r0: LittleEndian::read_u32(&buffer[7..]), r1: LittleEndian::read_u32(&buffer[11..]), r2: LittleEndian::read_u32(&buffer[15..]), r3: LittleEndian::read_u32(&buffer[19..]), r12: LittleEndian::read_u32(&buffer[23..]), lr: LittleEndian::read_u32(&buffer[27..]), pc: LittleEndian::read_u32(&buffer[31..]), xpsr: LittleEndian::read_u32(&buffer[35..]), debug_data_len, debug_data_buf: [0; MAX_DEBUG_DATA_LEN], }; fault_data.debug_data_buf[..debug_data_len].copy_from_slice(&buffer[40..]); Ok(fault_data) } macro_rules! require_l2cap_event_data_len { ($left:expr, $right:expr) => { let actual = $left[4]; if actual != $right { return Err(hci::event::Error::Vendor(BlueNRGError::BadL2CapDataLength( actual, $right, ))); } }; } macro_rules! require_l2cap_len { ($actual:expr, $expected:expr) => { if $actual != $expected { return Err(hci::event::Error::Vendor(BlueNRGError::BadL2CapLength( $actual, $expected, ))); } }; } /// This event is generated when the central device responds to the L2CAP connection update request /// packet. /// /// For more info see connection parameter update response and command reject in Bluetooth Core v4.0 /// spec. #[derive(Copy, Clone, Debug)] pub struct L2CapConnectionUpdateResponse { /// The connection handle related to the event pub conn_handle: ConnectionHandle, /// The result of the update request, including details about the result. pub result: L2CapConnectionUpdateResult, } /// Reasons why an L2CAP command was rejected. see the Bluetooth specification, v4.1, Vol 3, Part A, /// Section 4.1. #[derive(Copy, Clone, Debug, PartialEq)] pub enum L2CapRejectionReason { /// The controller sent an unknown command. CommandNotUnderstood, /// When multiple commands are included in an L2CAP packet and the packet exceeds the signaling /// MTU (MTUsig) of the receiver, a single Command Reject packet shall be sent in response. SignalingMtuExceeded, /// Invalid CID in request InvalidCid, } impl TryFrom<u16> for L2CapRejectionReason { type Error = BlueNRGError; fn try_from(value: u16) -> Result<Self, Self::Error> { match value { 0 => Ok(L2CapRejectionReason::CommandNotUnderstood), 1 => Ok(L2CapRejectionReason::SignalingMtuExceeded), 2 => Ok(L2CapRejectionReason::InvalidCid), _ => Err(BlueNRGError::BadL2CapRejectionReason(value)), } } } /// Potential results that can be used in the L2CAP connection update response. #[derive(Copy, Clone, Debug, PartialEq)] pub enum L2CapConnectionUpdateResult { /// The update request was rejected. The code indicates the reason for the rejection. CommandRejected(L2CapRejectionReason), /// The L2CAP connection update response is valid. The code indicates if the parameters were /// rejected. ParametersRejected, /// The L2CAP connection update response is valid. The code indicates if the parameters were /// updated. ParametersUpdated, } fn to_l2cap_connection_update_accepted_result( value: u16, ) -> Result<L2CapConnectionUpdateResult, BlueNRGError> { match value { 0x0000 => Ok(L2CapConnectionUpdateResult::ParametersUpdated), 0x0001 => Ok(L2CapConnectionUpdateResult::ParametersRejected), _ => Err(BlueNRGError::BadL2CapConnectionResponseResult(value)), } } fn extract_l2cap_connection_update_response_result( buffer: &[u8], ) -> Result<L2CapConnectionUpdateResult, BlueNRGError> { match buffer[5] { 0x01 => Ok(L2CapConnectionUpdateResult::CommandRejected( LittleEndian::read_u16(&buffer[9..]).try_into()?, )), 0x13 => to_l2cap_connection_update_accepted_result(LittleEndian::read_u16(&buffer[9..])), _ => Err(BlueNRGError::BadL2CapConnectionResponseCode(buffer[5])), } } fn to_l2cap_connection_update_response( buffer: &[u8], ) -> Result<L2CapConnectionUpdateResponse, hci::event::Error<BlueNRGError>> { require_len!(buffer, 11); require_l2cap_event_data_len!(buffer, 6); require_l2cap_len!(LittleEndian::read_u16(&buffer[7..]), 2); Ok(L2CapConnectionUpdateResponse { conn_handle: ConnectionHandle(LittleEndian::read_u16(&buffer[2..])), result: extract_l2cap_connection_update_response_result(buffer) .map_err(hci::event::Error::Vendor)?, }) } /// This event is generated when the central device does not respond to the connection update /// request within 30 seconds. #[derive(Copy, Clone, Debug)] pub struct L2CapProcedureTimeout { /// The connection handle related to the event. pub conn_handle: ConnectionHandle, } fn to_l2cap_procedure_timeout( buffer: &[u8], ) -> Result<ConnectionHandle, hci::event::Error<BlueNRGError>> { require_len!(buffer, 5); require_l2cap_event_data_len!(buffer, 0); Ok(ConnectionHandle(LittleEndian::read_u16(&buffer[2..]))) } /// The event is given by the L2CAP layer when a connection update request is received from the /// peripheral. /// /// The application has to respond by calling /// [`l2cap_connection_parameter_update_response`](crate::l2cap::Commands::connection_parameter_update_response). /// /// Defined in Vol 3, Part A, section 4.20 of the Bluetooth specification. #[derive(Copy, Clone, Debug)] pub struct L2CapConnectionUpdateRequest { /// Handle of the connection for which the connection update request has been received. The /// [same handle](crate::l2cap::ConnectionParameterUpdateResponse::conn_handle) has to be /// returned while responding to the event with the command /// [`l2cap_connection_parameter_update_response`](crate::l2cap::Commands::connection_parameter_update_response). pub conn_handle: ConnectionHandle, /// This is the identifier which associates the request to the response. The [same /// identifier](crate::l2cap::ConnectionParameterUpdateResponse::identifier) has to be returned /// by the upper layer in the command /// [`l2cap_connection_parameter_update_response`](crate::l2cap::Commands::connection_parameter_update_response). pub identifier: u8, /// Defines the range of the connection interval, the latency, and the supervision timeout. pub conn_interval: ConnectionInterval, } fn to_l2cap_connection_update_request( buffer: &[u8], ) -> Result<L2CapConnectionUpdateRequest, hci::event::Error<BlueNRGError>> { require_len!(buffer, 16); require_l2cap_event_data_len!(buffer, 11); require_l2cap_len!(LittleEndian::read_u16(&buffer[6..]), 8); let interval = ConnectionInterval::from_bytes(&buffer[8..16]) .map_err(BlueNRGError::BadConnectionInterval) .map_err(hci::event::Error::Vendor)?; Ok(L2CapConnectionUpdateRequest { conn_handle: ConnectionHandle(LittleEndian::read_u16(&buffer[2..])), identifier: buffer[5], conn_interval: interval, }) } /// This event is generated when the pairing process has completed successfully or a pairing /// procedure timeout has occurred or the pairing has failed. This is to notify the application that /// we have paired with a remote device so that it can take further actions or to notify that a /// timeout has occurred so that the upper layer can decide to disconnect the link. #[derive(Copy, Clone, Debug)] pub struct GapPairingComplete { /// Connection handle on which the pairing procedure completed pub conn_handle: ConnectionHandle, /// Reason the pairing is complete. pub status: GapPairingStatus, } /// Reasons the [GAP Pairing Complete](BlueNRGEvent::GapPairingComplete) event was generated. #[derive(Copy, Clone, Debug, PartialEq)] pub enum GapPairingStatus { /// Pairing with a remote device was successful. Success, /// The SMP timeout has elapsed and no further SMP commands will be processed until /// reconnection. Timeout, /// The pairing failed with the remote device. Failed, } impl TryFrom<u8> for GapPairingStatus { type Error = BlueNRGError; fn try_from(value: u8) -> Result<Self, Self::Error> { match value { 0 => Ok(GapPairingStatus::Success), 1 => Ok(GapPairingStatus::Timeout), 2 => Ok(GapPairingStatus::Failed), _ => Err(BlueNRGError::BadGapPairingStatus(value)), } } } fn to_gap_pairing_complete( buffer: &[u8], ) -> Result<GapPairingComplete, hci::event::Error<BlueNRGError>> { require_len!(buffer, 5); Ok(GapPairingComplete { conn_handle: ConnectionHandle(LittleEndian::read_u16(&buffer[2..])), status: buffer[4].try_into().map_err(hci::event::Error::Vendor)?, }) } fn to_conn_handle(buffer: &[u8]) -> Result<ConnectionHandle, hci::event::Error<BlueNRGError>> { require_len_at_least!(buffer, 4); Ok(ConnectionHandle(LittleEndian::read_u16(&buffer[2..]))) } /// The event is given by the GAP layer to the upper layers when a device is discovered during /// scanning as a consequence of one of the GAP procedures started by the upper layers. #[derive(Copy, Clone, Debug)] pub struct GapDeviceFound { /// Type of event pub event: GapDeviceFoundEvent, /// Address of the peer device found during scanning pub bdaddr: BdAddrType, // Length of significant data data_len: usize, // Advertising or scan response data. data_buf: [u8; 31], /// Received signal strength indicator (range: -127 - 20). pub rssi: Option<i8>, } impl GapDeviceFound { /// Returns the valid scan response data. pub fn data(&self) -> &[u8] { &self.data_buf[..self.data_len] } } pub use hci::event::AdvertisementEvent as GapDeviceFoundEvent; fn to_gap_device_found(buffer: &[u8]) -> Result<GapDeviceFound, hci::event::Error<BlueNRGError>> { const RSSI_UNAVAILABLE: i8 = 127; require_len_at_least!(buffer, 12); let data_len = buffer[10] as usize; require_len!(buffer, 12 + data_len); let rssi = unsafe { mem::transmute::<u8, i8>(buffer[buffer.len() - 1]) }; let mut addr = BdAddr([0; 6]); addr.0.copy_from_slice(&buffer[4..10]); let mut event = GapDeviceFound { event: buffer[2].try_into().map_err(|e| { if let hci::event::Error::BadLeAdvertisementType(code) = e { hci::event::Error::Vendor(BlueNRGError::BadGapDeviceFoundEvent(code)) } else { unreachable!() } })?, bdaddr: hci::to_bd_addr_type(buffer[3], addr) .map_err(|e| hci::event::Error::Vendor(BlueNRGError::BadGapBdAddrType(e.0)))?, data_len, data_buf: [0; 31], rssi: if rssi == RSSI_UNAVAILABLE { None } else { Some(rssi) }, }; event.data_buf[..event.data_len].copy_from_slice(&buffer[11..buffer.len() - 1]); Ok(event) } /// This event is sent by the GAP to the upper layers when a procedure previously started has been /// terminated by the upper layer or has completed for any other reason #[derive(Copy, Clone, Debug)] pub struct GapProcedureComplete { /// Type of procedure that completed pub procedure: GapProcedure, /// Status of the procedure pub status: GapProcedureStatus, } /// Maximum length of the name returned in the [`NameDiscovery`](GapProcedure::NameDiscovery) /// procedure. pub const MAX_NAME_LEN: usize = 248; /// Newtype for the name buffer returned after successful /// [`NameDiscovery`](GapProcedure::NameDiscovery). #[derive(Copy, Clone)] pub struct NameBuffer(pub [u8; MAX_NAME_LEN]); impl Debug for NameBuffer { fn fmt(&self, f: &mut Formatter) -> FmtResult { first_16(&self.0).fmt(f) } } impl PartialEq<NameBuffer> for NameBuffer { fn eq(&self, other: &Self) -> bool { if self.0.len() != other.0.len() { return false; } for (a, b) in self.0.iter().zip(other.0.iter()) { if a != b { return false; } } true } } /// Procedures whose completion may be reported by /// [`GapProcedureComplete`](BlueNRGEvent::GapProcedureComplete). #[allow(clippy::large_enum_variant)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum GapProcedure { /// See Vol 3, Part C, section 9.2.5. LimitedDiscovery, /// See Vol 3, Part C, section 9.2.6. GeneralDiscovery, /// See Vol 3, Part C, section 9.2.7. Contains the number of valid bytes and buffer with enough /// space for the maximum length of the name that can be retuned. NameDiscovery(usize, NameBuffer), /// See Vol 3, Part C, section 9.3.5. AutoConnectionEstablishment, /// See Vol 3, Part C, section 9.3.6. Contains the reconnection address. GeneralConnectionEstablishment(BdAddr), /// See Vol 3, Part C, section 9.3.7. SelectiveConnectionEstablishment, /// See Vol 3, Part C, section 9.3.8. DirectConnectionEstablishment, } /// Possible results of a [GAP procedure](BlueNRGEvent::GapProcedureComplete). #[derive(Copy, Clone, Debug, PartialEq)] pub enum GapProcedureStatus { /// BLE Status Success. Success, /// BLE Status Failed. Failed, /// Procedure failed due to authentication requirements. AuthFailure, } impl TryFrom<u8> for GapProcedureStatus { type Error = BlueNRGError; fn try_from(value: u8) -> Result<Self, Self::Error> { match value { 0x00 => Ok(GapProcedureStatus::Success), 0x41 => Ok(GapProcedureStatus::Failed), 0x05 => Ok(GapProcedureStatus::AuthFailure), _ => Err(BlueNRGError::BadGapProcedureStatus(value)), } } } fn to_gap_procedure_complete( buffer: &[u8], ) -> Result<GapProcedureComplete, hci::event::Error<BlueNRGError>> { require_len_at_least!(buffer, 4); let procedure = match buffer[2] { 0x01 => GapProcedure::LimitedDiscovery, 0x02 => GapProcedure::GeneralDiscovery, 0x04 => { require_len_at_least!(buffer, 5); let name_len = buffer.len() - 4; let mut name = NameBuffer([0; MAX_NAME_LEN]); name.0[..name_len].copy_from_slice(&buffer[4..]); GapProcedure::NameDiscovery(name_len, name) } 0x08 => GapProcedure::AutoConnectionEstablishment, 0x10 => { require_len!(buffer, 10); let mut addr = BdAddr([0; 6]); addr.0.copy_from_slice(&buffer[4..10]); GapProcedure::GeneralConnectionEstablishment(addr) } 0x20 => GapProcedure::SelectiveConnectionEstablishment, 0x40 => GapProcedure::DirectConnectionEstablishment, _ => { return Err(hci::event::Error::Vendor(BlueNRGError::BadGapProcedure( buffer[2], ))); } }; Ok(GapProcedureComplete { procedure, status: buffer[3].try_into().map_err(hci::event::Error::Vendor)?, }) } #[cfg(not(feature = "ms"))] fn to_gap_reconnection_address(buffer: &[u8]) -> Result<BdAddr, hci::event::Error<BlueNRGError>> { require_len!(buffer, 8); let mut addr = BdAddr([0; 6]); addr.0.copy_from_slice(&buffer[2..]); Ok(addr) } /// This event is generated to the application by the ATT server when a client modifies any /// attribute on the server, as consequence of one of the following ATT procedures: /// - write without response /// - signed write without response /// - write characteristic value /// - write long characteristic value /// - reliable write #[derive(Copy, Clone)] pub struct GattAttributeModified { /// The connection handle which modified the attribute pub conn_handle: ConnectionHandle, /// Handle of the attribute that was modified pub attr_handle: AttributeHandle, /// Offset of the reported value inside the attribute. #[cfg(feature = "ms")] pub offset: usize, /// If the entire value of the attribute does not fit inside a single GattAttributeModified /// event, this is true to notify that other GattAttributeModified events will follow to report /// the remaining value. #[cfg(feature = "ms")] pub continued: bool, /// Number of valid bytes in |data|. data_len: usize, /// The new attribute value, starting from the given offset. If compiling with "ms" support, the /// offset is 0. data_buf: [u8; MAX_ATTRIBUTE_LEN], } impl GattAttributeModified { /// Returns the valid attribute data returned by the ATT attribute modified event as a slice of /// bytes. pub fn data(&self) -> &[u8] { &self.data_buf[..self.data_len] } } /// Newtype for an attribute handle. These handles are IDs, not general integers, and should not be /// manipulated as such. #[derive(Copy, Clone, Debug, PartialEq)] pub struct AttributeHandle(pub u16); // Defines the maximum length of a ATT attribute value field. This is determined by the max packet // size (255) less the minimum number of bytes used by other fields in any packet. const MAX_ATTRIBUTE_LEN: usize = 248; impl Debug for GattAttributeModified { #[cfg(feature = "ms")] fn fmt(&self, f: &mut Formatter) -> FmtResult { write!( f, "{{conn_handle: {:?}, attr_handle: {:?}, offset: {}, continued: {}, data: {:?}}}", self.conn_handle, self.attr_handle, self.offset, self.continued, first_16(self.data()), ) } #[cfg(not(feature = "ms"))] fn fmt(&self, f: &mut Formatter) -> FmtResult { write!( f, "{{conn_handle: {:?}, attr_handle: {:?}, data: {:?}}}", self.conn_handle, self.attr_handle, first_16(self.data()), ) } } #[cfg(feature = "ms")] fn to_gatt_attribute_modified( buffer: &[u8], ) -> Result<GattAttributeModified, hci::event::Error<BlueNRGError>> { require_len_at_least!(buffer, 9); let data_len = buffer[6] as usize; require_len!(buffer, 9 + data_len); let mut data = [0; MAX_ATTRIBUTE_LEN]; data[..data_len].copy_from_slice(&buffer[9..]); let offset_field = LittleEndian::read_u16(&buffer[7..]); Ok(GattAttributeModified { conn_handle: ConnectionHandle(LittleEndian::read_u16(&buffer[2..])), attr_handle: AttributeHandle(LittleEndian::read_u16(&buffer[4..])), offset: (offset_field & 0x7FFF) as usize, continued: (offset_field & 0x8000) > 0, data_len, data_buf: data, }) } #[cfg(not(feature = "ms"))] fn to_gatt_attribute_modified( buffer: &[u8], ) -> Result<GattAttributeModified, hci::event::Error<BlueNRGError>> { require_len_at_least!(buffer, 7); let data_len = buffer[6] as usize; require_len!(buffer, 7 + data_len); let mut data = [0; MAX_ATTRIBUTE_LEN]; data[..data_len].copy_from_slice(&buffer[7..]); Ok(GattAttributeModified { conn_handle: ConnectionHandle(LittleEndian::read_u16(&buffer[2..])), attr_handle: AttributeHandle(LittleEndian::read_u16(&buffer[4..])), data_len, data_buf: data, }) } /// This event is generated in response to an Exchange MTU request. #[derive(Copy, Clone, Debug)] pub struct AttExchangeMtuResponse { /// The connection handle related to the response. pub conn_handle: ConnectionHandle, /// Attribute server receive MTU size. pub server_rx_mtu: usize, } fn to_att_exchange_mtu_resp( buffer: &[u8], ) -> Result<AttExchangeMtuResponse, hci::event::Error<BlueNRGError>> { require_len!(buffer, 7); Ok(AttExchangeMtuResponse { conn_handle: ConnectionHandle(LittleEndian::read_u16(&buffer[2..])), server_rx_mtu: LittleEndian::read_u16(&buffer[5..]) as usize, }) } /// This event is generated in response to a Find Information Request. See Find Information Response /// in Bluetooth Core v4.0 spec. #[derive(Copy, Clone, Debug)] pub struct AttFindInformationResponse { /// The connection handle related to the response pub conn_handle: ConnectionHandle, /// The Find Information Response shall have complete handle-UUID pairs. Such pairs shall not be /// split across response packets; this also implies that a handleUUID pair shall fit into a /// single response packet. The handle-UUID pairs shall be returned in ascending order of /// attribute handles. handle_uuid_pairs: HandleUuidPairs, } impl AttFindInformationResponse { /// The Find Information Response shall have complete handle-UUID pairs. Such pairs shall not be /// split across response packets; this also implies that a handleUUID pair shall fit into a /// single response packet. The handle-UUID pairs shall be returned in ascending order of /// attribute handles. pub fn handle_uuid_pair_iter(&self) -> HandleUuidPairIterator { match self.handle_uuid_pairs { HandleUuidPairs::Format16(count, ref data) => { HandleUuidPairIterator::Format16(HandleUuid16PairIterator { data, count, next_index: 0, }) } HandleUuidPairs::Format128(count, ref data) => { HandleUuidPairIterator::Format128(HandleUuid128PairIterator { data, count, next_index: 0, }) } } } } // Assuming a maximum HCI packet size of 255, these are the maximum number of handle-UUID pairs for // each format that can be in one packet. Formats cannot be mixed in a single packet. // // Packets have 6 other bytes of data preceding the handle-UUID pairs. // // max = floor((255 - 6) / pair_length) const MAX_FORMAT16_PAIR_COUNT: usize = 62; const MAX_FORMAT128_PAIR_COUNT: usize = 13; /// One format of the handle-UUID pairs in the [`AttFindInformationResponse`] event. The UUIDs are /// 16 bits. #[derive(Copy, Clone, Debug)] pub struct HandleUuid16Pair { /// Attribute handle pub handle: AttributeHandle, /// Attribute UUID pub uuid: Uuid16, } /// One format of the handle-UUID pairs in the [`AttFindInformationResponse`] event. The UUIDs are /// 128 bits. #[derive(Copy, Clone, Debug)] pub struct HandleUuid128Pair { /// Attribute handle pub handle: AttributeHandle, /// Attribute UUID pub uuid: Uuid128, } /// Newtype for the 16-bit UUID buffer. #[derive(Copy, Clone, Debug, PartialEq)] pub struct Uuid16(pub u16); /// Newtype for the 128-bit UUID buffer. #[derive(Copy, Clone, Debug, PartialEq)] pub struct Uuid128(pub [u8; 16]); #[derive(Copy, Clone)] enum HandleUuidPairs { Format16(usize, [HandleUuid16Pair; MAX_FORMAT16_PAIR_COUNT]), Format128(usize, [HandleUuid128Pair; MAX_FORMAT128_PAIR_COUNT]), } impl Debug for HandleUuidPairs { fn fmt(&self, f: &mut Formatter) -> FmtResult { write!(f, "{{")?; match *self { HandleUuidPairs::Format16(count, pairs) => { for handle_uuid_pair in &pairs[..count] { write!( f, "{{{:?}, {:?}}}", handle_uuid_pair.handle, handle_uuid_pair.uuid )? } } HandleUuidPairs::Format128(count, pairs) => { for handle_uuid_pair in &pairs[..count] { write!( f, "{{{:?}, {:?}}}", handle_uuid_pair.handle, handle_uuid_pair.uuid )? } } } write!(f, "}}") } } /// Possible iterators over handle-UUID pairs that can be returnedby the [ATT find information /// response](AttFindInformationResponse). All pairs from the same event have the same format. pub enum HandleUuidPairIterator<'a> { /// The event contains 16-bit UUIDs. Format16(HandleUuid16PairIterator<'a>), /// The event contains 128-bit UUIDs. Format128(HandleUuid128PairIterator<'a>), } /// Iterator over handle-UUID pairs for 16-bit UUIDs. pub struct HandleUuid16PairIterator<'a> { data: &'a [HandleUuid16Pair; MAX_FORMAT16_PAIR_COUNT], count: usize, next_index: usize, } impl<'a> Iterator for HandleUuid16PairIterator<'a> { type Item = HandleUuid16Pair; fn next(&mut self) -> Option<Self::Item> { if self.next_index >= self.count { return None; } let index = self.next_index; self.next_index += 1; Some(self.data[index]) } } /// Iterator over handle-UUID pairs for 128-bit UUIDs. pub struct HandleUuid128PairIterator<'a> { data: &'a [HandleUuid128Pair; MAX_FORMAT128_PAIR_COUNT], count: usize, next_index: usize, } impl<'a> Iterator for HandleUuid128PairIterator<'a> { type Item = HandleUuid128Pair; fn next(&mut self) -> Option<Self::Item> { if self.next_index >= self.count { return None; } let index = self.next_index; self.next_index += 1; Some(self.data[index]) } } fn to_att_find_information_response( buffer: &[u8], ) -> Result<AttFindInformationResponse, hci::event::Error<BlueNRGError>> { require_len_at_least!(buffer, 6); let data_len = buffer[4] as usize; require_len!(buffer, 5 + data_len); Ok(AttFindInformationResponse { conn_handle: to_conn_handle(buffer)?, handle_uuid_pairs: match buffer[5] { 1 => to_handle_uuid16_pairs(&buffer[6..]).map_err(hci::event::Error::Vendor)?, 2 => to_handle_uuid128_pairs(&buffer[6..]).map_err(hci::event::Error::Vendor)?, _ => { return Err(hci::event::Error::Vendor( BlueNRGError::BadAttFindInformationResponseFormat(buffer[5]), )); } }, }) } fn to_handle_uuid16_pairs(buffer: &[u8]) -> Result<HandleUuidPairs, BlueNRGError> { const PAIR_LEN: usize = 4; if buffer.len() % PAIR_LEN != 0 { return Err(BlueNRGError::AttFindInformationResponsePartialPair16); } let count = buffer.len() / PAIR_LEN; let mut pairs = [HandleUuid16Pair { handle: AttributeHandle(0), uuid: Uuid16(0), }; MAX_FORMAT16_PAIR_COUNT]; for (i, pair) in pairs.iter_mut().enumerate().take(count) { let index = i * PAIR_LEN; pair.handle = AttributeHandle(LittleEndian::read_u16(&buffer[index..])); pair.uuid = Uuid16(LittleEndian::read_u16(&buffer[2 + index..])); } Ok(HandleUuidPairs::Format16(count, pairs)) } fn to_handle_uuid128_pairs(buffer: &[u8]) -> Result<HandleUuidPairs, BlueNRGError> { const PAIR_LEN: usize = 18; if buffer.len() % PAIR_LEN != 0 { return Err(BlueNRGError::AttFindInformationResponsePartialPair128); } let count = buffer.len() / PAIR_LEN; let mut pairs = [HandleUuid128Pair { handle: AttributeHandle(0), uuid: Uuid128([0; 16]), }; MAX_FORMAT128_PAIR_COUNT]; for (i, pair) in pairs.iter_mut().enumerate().take(count) { let index = i * PAIR_LEN; let next_index = (i + 1) * PAIR_LEN; pair.handle = AttributeHandle(LittleEndian::read_u16(&buffer[index..])); pair.uuid.0.copy_from_slice(&buffer[2 + index..next_index]); } Ok(HandleUuidPairs::Format128(count, pairs)) } /// This event is generated in response to a Find By Type Value Request. #[derive(Copy, Clone)] pub struct AttFindByTypeValueResponse { /// The connection handle related to the response. pub conn_handle: ConnectionHandle, /// The number of valid pairs that follow. handle_pair_count: usize, /// Handles Information List as defined in Bluetooth Core v4.1 spec. handles: [HandleInfoPair; MAX_HANDLE_INFO_PAIR_COUNT], } impl AttFindByTypeValueResponse { /// Returns an iterator over the Handles Information List as defined in Bluetooth Core v4.1 /// spec. pub fn handle_pairs_iter(&self) -> HandleInfoPairIterator { HandleInfoPairIterator { event: &self, next_index: 0, } } } impl Debug for AttFindByTypeValueResponse { fn fmt(&self, f: &mut Formatter) -> FmtResult { write!(f, "{{.conn_handle = {:?}, ", self.conn_handle)?; for handle_pair in self.handle_pairs_iter() { write!(f, "{:?}", handle_pair)?; } write!(f, "}}") } } // Assuming a maximum HCI packet size of 255, these are the maximum number of handle pairs that can // be in one packet. // // Packets have 5 other bytes of data preceding the handle-UUID pairs. // // max = floor((255 - 5) / 4) const MAX_HANDLE_INFO_PAIR_COUNT: usize = 62; /// Simple container for the handle information returned in [`AttFindByTypeValueResponse`]. #[derive(Copy, Clone, Debug)] pub struct HandleInfoPair { /// Attribute handle pub attribute: AttributeHandle, /// Group End handle pub group_end: GroupEndHandle, } /// Newtype for Group End handles #[derive(Copy, Clone, Debug, PartialEq)] pub struct GroupEndHandle(pub u16); /// Iterator into valid [`HandleInfoPair`] structs returned in the [ATT Find By Type Value /// Response](AttFindByTypeValueResponse) event. pub struct HandleInfoPairIterator<'a> { event: &'a AttFindByTypeValueResponse, next_index: usize, } impl<'a> Iterator for HandleInfoPairIterator<'a> { type Item = HandleInfoPair; fn next(&mut self) -> Option<Self::Item> { if self.next_index >= self.event.handle_pair_count { return None; } let index = self.next_index; self.next_index += 1; Some(self.event.handles[index]) } } fn to_att_find_by_value_type_response( buffer: &[u8], ) -> Result<AttFindByTypeValueResponse, hci::event::Error<BlueNRGError>> { const PAIR_LEN: usize = 4; require_len_at_least!(buffer, 5); let data_len = buffer[4] as usize; require_len!(buffer, 5 + data_len); let pair_buffer = &buffer[5..]; if pair_buffer.len() % PAIR_LEN != 0 { return Err(hci::event::Error::Vendor( BlueNRGError::AttFindByTypeValuePartial, )); } let count = pair_buffer.len() / PAIR_LEN; let mut pairs = [HandleInfoPair { attribute: AttributeHandle(0), group_end: GroupEndHandle(0), }; MAX_HANDLE_INFO_PAIR_COUNT]; for (i, pair) in pairs.iter_mut().enumerate().take(count) { let index = i * PAIR_LEN; pair.attribute = AttributeHandle(LittleEndian::read_u16(&pair_buffer[index..])); pair.group_end = GroupEndHandle(LittleEndian::read_u16(&pair_buffer[2 + index..])); } Ok(AttFindByTypeValueResponse { conn_handle: to_conn_handle(buffer)?, handle_pair_count: count, handles: pairs, }) } /// This event is generated in response to a Read By Type Request. #[derive(Copy, Clone)] pub struct AttReadByTypeResponse { /// The connection handle related to the response. pub conn_handle: ConnectionHandle, // Number of valid bytes in `handle_value_pair_buf` data_len: usize, // Length of each value in `handle_value_pair_buf` value_len: usize, // Raw data of the response. Contains 2 octets for the attribute handle followed by `value_len` // octets of value data. These pairs repeat for `data_len` bytes. handle_value_pair_buf: [u8; MAX_HANDLE_VALUE_PAIR_BUF_LEN], } // The maximum amount of data in the buffer is the max HCI packet size (255) less the other data in // the packet. const MAX_HANDLE_VALUE_PAIR_BUF_LEN: usize = 249; impl Debug for AttReadByTypeResponse { fn fmt(&self, f: &mut Formatter) -> FmtResult { write!(f, "{{.conn_handle = {:?}, ", self.conn_handle)?; for handle_value_pair in self.handle_value_pair_iter() { write!( f, "{{handle: {:?}, value: {:?}}}", handle_value_pair.handle, first_16(handle_value_pair.value) )?; } write!(f, "}}") } } impl AttReadByTypeResponse { /// Return an iterator over all valid handle-value pairs returned with the response. pub fn handle_value_pair_iter(&self) -> HandleValuePairIterator { HandleValuePairIterator { event: &self, index: 0, } } } /// Iterator over the valid handle-value pairs returned with the [ATT Read by Type /// response](AttReadByTypeResponse). pub struct HandleValuePairIterator<'a> { event: &'a AttReadByTypeResponse, index: usize, } impl<'a> Iterator for HandleValuePairIterator<'a> { type Item = HandleValuePair<'a>; fn next(&mut self) -> Option<Self::Item> { if self.index >= self.event.data_len { return None; } let handle_index = self.index; let value_index = self.index + 2; self.index += 2 + self.event.value_len; let next_index = self.index; Some(HandleValuePair { handle: AttributeHandle(LittleEndian::read_u16( &self.event.handle_value_pair_buf[handle_index..], )), value: &self.event.handle_value_pair_buf[value_index..next_index], }) } } /// A single handle-value pair returned by the [ATT Read by Type response](AttReadByTypeResponse). pub struct HandleValuePair<'a> { /// Attribute handle pub handle: AttributeHandle, /// Attribute value. The caller must interpret the value correctly, depending on the expected /// type of the attribute. pub value: &'a [u8], } fn to_att_read_by_type_response( buffer: &[u8], ) -> Result<AttReadByTypeResponse, hci::event::Error<BlueNRGError>> { require_len_at_least!(buffer, 6); let data_len = buffer[4] as usize; require_len!(buffer, 5 + data_len); let handle_value_pair_len = buffer[5] as usize; let handle_value_pair_buf = &buffer[6..]; if handle_value_pair_buf.len() % handle_value_pair_len != 0 { return Err(hci::event::Error::Vendor( BlueNRGError::AttReadByTypeResponsePartial, )); } let mut full_handle_value_pair_buf = [0; MAX_HANDLE_VALUE_PAIR_BUF_LEN]; full_handle_value_pair_buf[..handle_value_pair_buf.len()] .copy_from_slice(&handle_value_pair_buf); Ok(AttReadByTypeResponse { conn_handle: ConnectionHandle(LittleEndian::read_u16(&buffer[2..])), data_len: handle_value_pair_buf.len(), value_len: handle_value_pair_len - 2, handle_value_pair_buf: full_handle_value_pair_buf, }) } /// This event is generated in response to a Read Request. #[derive(Copy, Clone)] pub struct AttReadResponse { /// The connection handle related to the response. pub conn_handle: ConnectionHandle, /// The number of valid bytes in the value buffer. value_len: usize, /// Buffer containing the value data. value_buf: [u8; MAX_READ_RESPONSE_LEN], } // The maximum amount of data in the buffer is the max HCI packet size (255) less the other data in // the packet. const MAX_READ_RESPONSE_LEN: usize = 250; impl Debug for AttReadResponse { fn fmt(&self, f: &mut Formatter) -> FmtResult { write!( f, "{{.conn_handle = {:?}, value = {:?}}}", self.conn_handle, first_16(self.value()) ) } } impl AttReadResponse { /// Returns the valid part of the value data. pub fn value(&self) -> &[u8] { &self.value_buf[..self.value_len] } } fn to_att_read_response(buffer: &[u8]) -> Result<AttReadResponse, hci::event::Error<BlueNRGError>> { require_len_at_least!(buffer, 5); let data_len = buffer[4] as usize; require_len!(buffer, 5 + data_len); let mut value_buf = [0; MAX_READ_RESPONSE_LEN]; value_buf[..data_len].copy_from_slice(&buffer[5..]); Ok(AttReadResponse { conn_handle: ConnectionHandle(LittleEndian::read_u16(&buffer[2..])), value_len: data_len, value_buf, }) } /// This event is generated in response to a Read By Group Type Request. See the Bluetooth Core v4.1 /// spec, Vol 3, section 3.4.4.9 and 3.4.4.10. #[derive(Copy, Clone)] pub struct AttReadByGroupTypeResponse { /// The connection handle related to the response. pub conn_handle: ConnectionHandle, // Number of valid bytes in `attribute_data_buf` data_len: usize, // Length of the attribute data group in `attribute_data_buf`, including the attribute and group // end handles. attribute_group_len: usize, // List of attribute data which is a repetition of: // 1. 2 octets for attribute handle. // 2. 2 octets for end group handle. // 3. (attribute_group_len - 4) octets for attribute value. attribute_data_buf: [u8; MAX_ATTRIBUTE_DATA_BUF_LEN], } // The maximum amount of data in the buffer is the max HCI packet size (255) less the other data in // the packet. const MAX_ATTRIBUTE_DATA_BUF_LEN: usize = 249; impl AttReadByGroupTypeResponse { /// Create and return an iterator for the attribute data returned with the response. pub fn attribute_data_iter(&self) -> AttributeDataIterator { AttributeDataIterator { event: self, next_index: 0, } } } impl Debug for AttReadByGroupTypeResponse { fn fmt(&self, f: &mut Formatter) -> FmtResult { write!(f, "{{.conn_handle = {:?}, ", self.conn_handle)?; for attribute_data in self.attribute_data_iter() { write!( f, "{{.attribute_handle = {:?}, .group_end_handle = {:?}, .value = {:?}}}", attribute_data.attribute_handle, attribute_data.group_end_handle, first_16(attribute_data.value) )?; } write!(f, "}}") } } /// Iterator over the attribute data returned in the [`AttReadByGroupTypeResponse`]. pub struct AttributeDataIterator<'a> { event: &'a AttReadByGroupTypeResponse, next_index: usize, } impl<'a> Iterator for AttributeDataIterator<'a> { type Item = AttributeData<'a>; fn next(&mut self) -> Option<Self::Item> { if self.next_index >= self.event.data_len { return None; } let attr_handle_index = self.next_index; let group_end_index = 2 + attr_handle_index; let value_index = 2 + group_end_index; self.next_index += self.event.attribute_group_len; Some(AttributeData { attribute_handle: AttributeHandle(LittleEndian::read_u16( &self.event.attribute_data_buf[attr_handle_index..], )), group_end_handle: GroupEndHandle(LittleEndian::read_u16( &self.event.attribute_data_buf[group_end_index..], )), value: &self.event.attribute_data_buf[value_index..self.next_index], }) } } /// Attribute data returned in the [`AttReadByGroupTypeResponse`] event. pub struct AttributeData<'a> { /// Attribute handle pub attribute_handle: AttributeHandle, /// Group end handle pub group_end_handle: GroupEndHandle, /// Attribute value pub value: &'a [u8], } fn to_att_read_by_group_type_response( buffer: &[u8], ) -> Result<AttReadByGroupTypeResponse, hci::event::Error<BlueNRGError>> { require_len_at_least!(buffer, 6); let data_len = buffer[4] as usize; require_len!(buffer, 5 + data_len); let attribute_group_len = buffer[5] as usize; if buffer[6..].len() % attribute_group_len != 0 { return Err(hci::event::Error::Vendor( BlueNRGError::AttReadByGroupTypeResponsePartial, )); } let mut attribute_data_buf = [0; MAX_ATTRIBUTE_DATA_BUF_LEN]; attribute_data_buf[..data_len - 1].copy_from_slice(&buffer[6..]); Ok(AttReadByGroupTypeResponse { conn_handle: ConnectionHandle(LittleEndian::read_u16(&buffer[2..])), data_len: data_len - 1, // lose 1 byte to attribute_group_len attribute_group_len, attribute_data_buf, }) } /// This event is generated in response to a Prepare Write Request. See the Bluetooth Core v4.1 /// spec, Vol 3, Part F, section 3.4.6.1 and 3.4.6.2 #[derive(Copy, Clone)] pub struct AttPrepareWriteResponse { /// The connection handle related to the response. pub conn_handle: ConnectionHandle, /// The handle of the attribute to be written. pub attribute_handle: AttributeHandle, /// The offset of the first octet to be written. pub offset: usize, /// Number of valid bytes in |value_buf| value_len: usize, value_buf: [u8; MAX_WRITE_RESPONSE_VALUE_LEN], } // The maximum amount of data in the buffer is the max HCI packet size (255) less the other data in // the packet. const MAX_WRITE_RESPONSE_VALUE_LEN: usize = 246; impl Debug for AttPrepareWriteResponse { fn fmt(&self, f: &mut Formatter) -> FmtResult { write!( f, "{{.conn_handle = {:?}, .attribute_handle = {:?}, .offset = {}, .value = {:?}}}", self.conn_handle, self.attribute_handle, self.offset, first_16(self.value()) ) } } impl AttPrepareWriteResponse { /// Returns the partial value of the attribute to be written. pub fn value(&self) -> &[u8] { &self.value_buf[..self.value_len] } } fn to_att_prepare_write_response( buffer: &[u8], ) -> Result<AttPrepareWriteResponse, hci::event::Error<BlueNRGError>> { require_len_at_least!(buffer, 9); let data_len = buffer[4] as usize; require_len!(buffer, 5 + data_len); let value_len = data_len - 4; let mut value_buf = [0; MAX_WRITE_RESPONSE_VALUE_LEN]; value_buf[..value_len].copy_from_slice(&buffer[9..]); Ok(AttPrepareWriteResponse { conn_handle: ConnectionHandle(LittleEndian::read_u16(&buffer[2..])), attribute_handle: AttributeHandle(LittleEndian::read_u16(&buffer[5..])), offset: LittleEndian::read_u16(&buffer[7..]) as usize, value_len, value_buf, }) } /// Defines the attribute value returned by a [GATT Indication](BlueNRGEvent::GattIndication) or /// [GATT Notification](BlueNRGEvent::GattNotification) event. #[derive(Copy, Clone)] pub struct AttributeValue { /// The connection handle related to the event. pub conn_handle: ConnectionHandle, /// The handle of the attribute. pub attribute_handle: AttributeHandle, // Number of valid bytes in value_buf value_len: usize, // Current value of the attribute. Only the first value_len bytes are valid. value_buf: [u8; MAX_ATTRIBUTE_VALUE_LEN], } // The maximum amount of data in the buffer is the max HCI packet size (255) less the other data in // the packet. const MAX_ATTRIBUTE_VALUE_LEN: usize = 248; impl Debug for AttributeValue { fn fmt(&self, f: &mut Formatter) -> FmtResult { write!( f, "{{.conn_handle = {:?}, .attribute_handle = {:?}, .value = {:?}}}", self.conn_handle, self.attribute_handle, first_16(self.value()) ) } } impl AttributeValue { /// Returns the current value of the attribute. pub fn value(&self) -> &[u8] { &self.value_buf[..self.value_len] } } fn to_attribute_value(buffer: &[u8]) -> Result<AttributeValue, hci::event::Error<BlueNRGError>> { require_len_at_least!(buffer, 7); let data_len = buffer[4] as usize; require_len!(buffer, 5 + data_len); let value_len = data_len - 2; let mut value_buf = [0; MAX_ATTRIBUTE_VALUE_LEN]; value_buf[..value_len].copy_from_slice(&buffer[7..]); Ok(AttributeValue { conn_handle: ConnectionHandle(LittleEndian::read_u16(&buffer[2..])), attribute_handle: AttributeHandle(LittleEndian::read_u16(&buffer[5..])), value_len, value_buf, }) } fn to_write_permit_request( buffer: &[u8], ) -> Result<AttributeValue, hci::event::Error<BlueNRGError>> { require_len_at_least!(buffer, 7); let data_len = buffer[6] as usize; require_len!(buffer, 7 + data_len); let value_len = data_len; let mut value_buf = [0; MAX_ATTRIBUTE_VALUE_LEN]; value_buf[..value_len].copy_from_slice(&buffer[7..]); Ok(AttributeValue { conn_handle: ConnectionHandle(LittleEndian::read_u16(&buffer[2..])), attribute_handle: AttributeHandle(LittleEndian::read_u16(&buffer[4..])), value_len, value_buf, }) } /// This event is generated when a GATT client procedure completes either with error or /// successfully. #[derive(Copy, Clone, Debug)] pub struct GattProcedureComplete { /// The connection handle for which the GATT procedure has completed. pub conn_handle: ConnectionHandle, /// Indicates whether the procedure completed with [error](GattProcedureStatus::Failed) or was /// [successful](GattProcedureStatus::Success). pub status: GattProcedureStatus, } /// Allowed status codes for the [GATT Procedure Complete](BlueNRGEvent::GattProcedureComplete) /// event. #[derive(Copy, Clone, Debug, PartialEq)] pub enum GattProcedureStatus { /// BLE Status Success Success, /// BLE Status Failed Failed, } impl TryFrom<u8> for GattProcedureStatus { type Error = BlueNRGError; fn try_from(value: u8) -> Result<Self, Self::Error> { match value { 0x00 => Ok(GattProcedureStatus::Success), 0x41 => Ok(GattProcedureStatus::Failed), _ => Err(BlueNRGError::BadGattProcedureStatus(value)), } } } fn to_gatt_procedure_complete( buffer: &[u8], ) -> Result<GattProcedureComplete, hci::event::Error<BlueNRGError>> { require_len!(buffer, 6); Ok(GattProcedureComplete { conn_handle: ConnectionHandle(LittleEndian::read_u16(&buffer[2..])), status: buffer[5].try_into().map_err(hci::event::Error::Vendor)?, }) } /// The Error Response is used to state that a given request cannot be performed, and to provide the /// reason. See the Bluetooth Core Specification, v4.1, Vol 3, Part F, Section 3.4.1.1. #[derive(Copy, Clone, Debug)] pub struct AttErrorResponse { /// The connection handle related to the event. pub conn_handle: ConnectionHandle, /// The request that generated this error response. pub request: AttRequest, ///The attribute handle that generated this error response. pub attribute_handle: AttributeHandle, /// The reason why the request has generated an error response. pub error: AttError, } /// Potential error codes for the [ATT Error Response](BlueNRGEvent::AttErrorResponse). See Table /// 3.3 in the Bluetooth Core Specification, v4.1, Vol 3, Part F, Section 3.4.1.1 and The Bluetooth /// Core Specification Supplement, Table 1.1. #[repr(u8)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum AttError { /// The attribute handle given was not valid on this server. InvalidHandle = 0x01, /// The attribute cannot be read. ReadNotPermitted = 0x02, /// The attribute cannot be written. WriteNotPermitted = 0x03, /// The attribute PDU was invalid. InvalidPdu = 0x04, /// The attribute requires authentication before it can be read or written. InsufficientAuthentication = 0x05, /// Attribute server does not support the request received from the client. RequestNotSupported = 0x06, /// Offset specified was past the end of the attribute. InvalidOffset = 0x07, /// The attribute requires authorization before it can be read or written. InsufficientAuthorization = 0x08, /// Too many prepare writes have been queued. PrepareQueueFull = 0x09, /// No attribute found within the given attribute handle range. AttributeNotFound = 0x0A, /// The attribute cannot be read or written using the Read Blob Request. AttributeNotLong = 0x0B, /// The Encryption Key Size used for encrypting this link is insufficient. InsufficientEncryptionKeySize = 0x0C, /// The attribute value length is invalid for the operation. InvalidAttributeValueLength = 0x0D, /// The attribute request that was requested has encountered an error that was unlikely, and /// therefore could not be completed as requested. UnlikelyError = 0x0E, /// The attribute requires encryption before it can be read or written. InsufficientEncryption = 0x0F, /// The attribute type is not a supported grouping attribute as defined by a higher layer /// specification. UnsupportedGroupType = 0x10, /// Insufficient Resources to complete the request. InsufficientResources = 0x11, /// Application error code defined by a higher layer specification. ApplicationError0x80 = 0x80, /// Application error code defined by a higher layer specification. ApplicationError0x81 = 0x81, /// Application error code defined by a higher layer specification. ApplicationError0x82 = 0x82, /// Application error code defined by a higher layer specification. ApplicationError0x83 = 0x83, /// Application error code defined by a higher layer specification. ApplicationError0x84 = 0x84, /// Application error code defined by a higher layer specification. ApplicationError0x85 = 0x85, /// Application error code defined by a higher layer specification. ApplicationError0x86 = 0x86, /// Application error code defined by a higher layer specification. ApplicationError0x87 = 0x87, /// Application error code defined by a higher layer specification. ApplicationError0x88 = 0x88, /// Application error code defined by a higher layer specification. ApplicationError0x89 = 0x89, /// Application error code defined by a higher layer specification. ApplicationError0x8A = 0x8A, /// Application error code defined by a higher layer specification. ApplicationError0x8B = 0x8B, /// Application error code defined by a higher layer specification. ApplicationError0x8C = 0x8C, /// Application error code defined by a higher layer specification. ApplicationError0x8D = 0x8D, /// Application error code defined by a higher layer specification. ApplicationError0x8E = 0x8E, /// Application error code defined by a higher layer specification. ApplicationError0x8F = 0x8F, /// Application error code defined by a higher layer specification. ApplicationError0x90 = 0x90, /// Application error code defined by a higher layer specification. ApplicationError0x91 = 0x91, /// Application error code defined by a higher layer specification. ApplicationError0x92 = 0x92, /// Application error code defined by a higher layer specification. ApplicationError0x93 = 0x93, /// Application error code defined by a higher layer specification. ApplicationError0x94 = 0x94, /// Application error code defined by a higher layer specification. ApplicationError0x95 = 0x95, /// Application error code defined by a higher layer specification. ApplicationError0x96 = 0x96, /// Application error code defined by a higher layer specification. ApplicationError0x97 = 0x97, /// Application error code defined by a higher layer specification. ApplicationError0x98 = 0x98, /// Application error code defined by a higher layer specification. ApplicationError0x99 = 0x99, /// Application error code defined by a higher layer specification. ApplicationError0x9A = 0x9A, /// Application error code defined by a higher layer specification. ApplicationError0x9B = 0x9B, /// Application error code defined by a higher layer specification. ApplicationError0x9C = 0x9C, /// Application error code defined by a higher layer specification. ApplicationError0x9D = 0x9D, /// Application error code defined by a higher layer specification. ApplicationError0x9E = 0x9E, /// Application error code defined by a higher layer specification. ApplicationError0x9F = 0x9F, /// The requested write operation cannot be fulfilled for reasons other than permissions. WriteRequestRejected = 0xFC, /// A Client Characteristic Configuration descriptor is not configured according to the /// requirements of the profile or service. ClientCharacteristicConfigurationDescriptorImproperlyConfigured = 0xFD, /// A profile or service request cannot be serviced because an operation that has been /// previously triggered is still in progress. ProcedureAlreadyInProgress = 0xFE, /// An attribute value is out of range as defined by a profile or service specification. OutOfRange = 0xFF, } impl TryFrom<u8> for AttError { type Error = u8; fn try_from(value: u8) -> Result<Self, Self::Error> { match value { 0x01 => Ok(AttError::InvalidHandle), 0x02 => Ok(AttError::ReadNotPermitted), 0x03 => Ok(AttError::WriteNotPermitted), 0x04 => Ok(AttError::InvalidPdu), 0x05 => Ok(AttError::InsufficientAuthentication), 0x06 => Ok(AttError::RequestNotSupported), 0x07 => Ok(AttError::InvalidOffset), 0x08 => Ok(AttError::InsufficientAuthorization), 0x09 => Ok(AttError::PrepareQueueFull), 0x0A => Ok(AttError::AttributeNotFound), 0x0B => Ok(AttError::AttributeNotLong), 0x0C => Ok(AttError::InsufficientEncryptionKeySize), 0x0D => Ok(AttError::InvalidAttributeValueLength), 0x0E => Ok(AttError::UnlikelyError), 0x0F => Ok(AttError::InsufficientEncryption), 0x10 => Ok(AttError::UnsupportedGroupType), 0x11 => Ok(AttError::InsufficientResources), 0x80 => Ok(AttError::ApplicationError0x80), 0x81 => Ok(AttError::ApplicationError0x81), 0x82 => Ok(AttError::ApplicationError0x82), 0x83 => Ok(AttError::ApplicationError0x83), 0x84 => Ok(AttError::ApplicationError0x84), 0x85 => Ok(AttError::ApplicationError0x85), 0x86 => Ok(AttError::ApplicationError0x86), 0x87 => Ok(AttError::ApplicationError0x87), 0x88 => Ok(AttError::ApplicationError0x88), 0x89 => Ok(AttError::ApplicationError0x89), 0x8A => Ok(AttError::ApplicationError0x8A), 0x8B => Ok(AttError::ApplicationError0x8B), 0x8C => Ok(AttError::ApplicationError0x8C), 0x8D => Ok(AttError::ApplicationError0x8D), 0x8E => Ok(AttError::ApplicationError0x8E), 0x8F => Ok(AttError::ApplicationError0x8F), 0x90 => Ok(AttError::ApplicationError0x90), 0x91 => Ok(AttError::ApplicationError0x91), 0x92 => Ok(AttError::ApplicationError0x92), 0x93 => Ok(AttError::ApplicationError0x93), 0x94 => Ok(AttError::ApplicationError0x94), 0x95 => Ok(AttError::ApplicationError0x95), 0x96 => Ok(AttError::ApplicationError0x96), 0x97 => Ok(AttError::ApplicationError0x97), 0x98 => Ok(AttError::ApplicationError0x98), 0x99 => Ok(AttError::ApplicationError0x99), 0x9A => Ok(AttError::ApplicationError0x9A), 0x9B => Ok(AttError::ApplicationError0x9B), 0x9C => Ok(AttError::ApplicationError0x9C), 0x9D => Ok(AttError::ApplicationError0x9D), 0x9E => Ok(AttError::ApplicationError0x9E), 0x9F => Ok(AttError::ApplicationError0x9F), 0xFC => Ok(AttError::WriteRequestRejected), 0xFD => Ok(AttError::ClientCharacteristicConfigurationDescriptorImproperlyConfigured), 0xFE => Ok(AttError::ProcedureAlreadyInProgress), 0xFF => Ok(AttError::OutOfRange), _ => Err(value), } } } /// Possible ATT requests. See Table 3.37 in the Bluetooth Core Spec v4.1, Vol 3, Part F, Section /// 3.4.8. #[repr(u8)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum AttRequest { /// Section 3.4.1.1 ErrorResponse = 0x01, /// Section 3.4.2.1 ExchangeMtuRequest = 0x02, /// Section 3.4.2.2 ExchangeMtuResponse = 0x03, /// Section 3.4.3.1 FindInformationRequest = 0x04, /// Section 3.4.3.2 FindInformationResponse = 0x05, /// Section 3.4.3.3 FindByTypeValueRequest = 0x06, /// Section 3.4.3.4 FindByTypeValueResponse = 0x07, /// Section 3.4.4.1 ReadByTypeRequest = 0x08, /// Section 3.4.4.2 ReadByTypeResponse = 0x09, /// Section 3.4.4.3 ReadRequest = 0x0A, /// Section 3.4.4.4 ReadResponse = 0x0B, /// Section 3.4.4.5 ReadBlobRequest = 0x0C, /// Section 3.4.4.6 ReadBlobResponse = 0x0D, /// Section 3.4.4.7 ReadMultipleRequest = 0x0E, /// Section 3.4.4.8 ReadMultipleResponse = 0x0F, /// Section 3.4.4.9 ReadByGroupTypeRequest = 0x10, /// Section 3.4.4.10 ReadByGroupTypeResponse = 0x11, /// Section 3.4.5.1 WriteRequest = 0x12, /// Section 3.4.5.2 WriteResponse = 0x13, /// Section 3.4.5.3 WriteCommand = 0x52, /// Section 3.4.5.4 SignedWriteCommand = 0xD2, /// Section 3.4.6.1 PrepareWriteRequest = 0x16, /// Section 3.4.6.2 PrepareWriteResponse = 0x17, /// Section 3.4.6.3 ExecuteWriteRequest = 0x18, /// Section 3.4.6.4 ExecuteWriteResponse = 0x19, /// Section 3.4.7.1 HandleValueNotification = 0x1B, /// Section 3.4.7.2 HandleValueIndication = 0x1D, /// Section 3.4.7.3 HandleValueConfirmation = 0x1E, } impl TryFrom<u8> for AttRequest { type Error = BlueNRGError; fn try_from(value: u8) -> Result<Self, Self::Error> { match value { 0x01 => Ok(AttRequest::ErrorResponse), 0x02 => Ok(AttRequest::ExchangeMtuRequest), 0x03 => Ok(AttRequest::ExchangeMtuResponse), 0x04 => Ok(AttRequest::FindInformationRequest), 0x05 => Ok(AttRequest::FindInformationResponse), 0x06 => Ok(AttRequest::FindByTypeValueRequest), 0x07 => Ok(AttRequest::FindByTypeValueResponse), 0x08 => Ok(AttRequest::ReadByTypeRequest), 0x09 => Ok(AttRequest::ReadByTypeResponse), 0x0A => Ok(AttRequest::ReadRequest), 0x0B => Ok(AttRequest::ReadResponse), 0x0C => Ok(AttRequest::ReadBlobRequest), 0x0D => Ok(AttRequest::ReadBlobResponse), 0x0E => Ok(AttRequest::ReadMultipleRequest), 0x0F => Ok(AttRequest::ReadMultipleResponse), 0x10 => Ok(AttRequest::ReadByGroupTypeRequest), 0x11 => Ok(AttRequest::ReadByGroupTypeResponse), 0x12 => Ok(AttRequest::WriteRequest), 0x13 => Ok(AttRequest::WriteResponse), 0x52 => Ok(AttRequest::WriteCommand), 0xD2 => Ok(AttRequest::SignedWriteCommand), 0x16 => Ok(AttRequest::PrepareWriteRequest), 0x17 => Ok(AttRequest::PrepareWriteResponse), 0x18 => Ok(AttRequest::ExecuteWriteRequest), 0x19 => Ok(AttRequest::ExecuteWriteResponse), 0x1B => Ok(AttRequest::HandleValueNotification), 0x1D => Ok(AttRequest::HandleValueIndication), 0x1E => Ok(AttRequest::HandleValueConfirmation), _ => Err(BlueNRGError::BadAttRequestOpcode(value)), } } } fn to_att_error_response( buffer: &[u8], ) -> Result<AttErrorResponse, hci::event::Error<BlueNRGError>> { require_len!(buffer, 9); Ok(AttErrorResponse { conn_handle: ConnectionHandle(LittleEndian::read_u16(&buffer[2..])), request: buffer[5].try_into().map_err(hci::event::Error::Vendor)?, attribute_handle: AttributeHandle(LittleEndian::read_u16(&buffer[6..])), error: buffer[8] .try_into() .map_err(BlueNRGError::BadAttError) .map_err(hci::event::Error::Vendor)?, }) } /// This event is given to the application when a read request or read blob request is received by /// the server from the client. This event will be given to the application only if the event bit /// for this event generation is set when the characteristic was added. On receiving this event, the /// application can update the value of the handle if it desires and when done it has to use the /// [`allow_read`](crate::gatt::Commands::allow_read) command to indicate to the stack that it can /// send the response to the client. /// /// See the Bluetooth Core v4.1 spec, Vol 3, Part F, section 3.4.4. #[derive(Copy, Clone, Debug)] pub struct AttReadPermitRequest { /// Handle of the connection on which there was the request to read the attribute pub conn_handle: ConnectionHandle, /// The handle of the attribute that has been requested by the client to be read. pub attribute_handle: AttributeHandle, /// Contains the offset from which the read has been requested. pub offset: usize, } fn to_att_read_permit_request( buffer: &[u8], ) -> Result<AttReadPermitRequest, hci::event::Error<BlueNRGError>> { require_len!(buffer, 9); Ok(AttReadPermitRequest { conn_handle: ConnectionHandle(LittleEndian::read_u16(&buffer[2..])), attribute_handle: AttributeHandle(LittleEndian::read_u16(&buffer[4..])), offset: LittleEndian::read_u16(&buffer[7..]) as usize, }) } /// This event is given to the application when a read multiple request or read by type request is /// received by the server from the client. This event will be given to the application only if the /// event bit for this event generation is set when the characteristic was added. On receiving this /// event, the application can update the values of the handles if it desires and when done it has /// to send the `gatt_allow_read` command to indicate to the stack that it can send the response to /// the client. /// /// See the Bluetooth Core v4.1 spec, Vol 3, Part F, section 3.4.4. #[derive(Copy, Clone)] pub struct AttReadMultiplePermitRequest { /// Handle of the connection which requested to read the attribute. pub conn_handle: ConnectionHandle, /// Number of valid handles in handles_buf handles_len: usize, /// Attribute handles returned by the ATT Read Multiple Permit Request. Only the first /// `handles_len` handles are valid. handles_buf: [AttributeHandle; MAX_ATTRIBUTE_HANDLE_BUFFER_LEN], } // The maximum number of handles in the buffer is the max HCI packet size (255) less the other data in // the packet divided by the length of an attribute handle (2). const MAX_ATTRIBUTE_HANDLE_BUFFER_LEN: usize = 125; impl Debug for AttReadMultiplePermitRequest { fn fmt(&self, f: &mut Formatter) -> FmtResult { write!( f, "{{.conn_handle = {:?}, .handles = {:?}", self.conn_handle, first_16(self.handles()) ) } } impl AttReadMultiplePermitRequest { /// Returns the valid attribute handles returned by the ATT Read Multiple Permit Request event. pub fn handles(&self) -> &[AttributeHandle] { &self.handles_buf[..self.handles_len] } } fn to_att_read_multiple_permit_request( buffer: &[u8], ) -> Result<AttReadMultiplePermitRequest, hci::event::Error<BlueNRGError>> { require_len_at_least!(buffer, 5); let data_len = buffer[4] as usize; if data_len % 2 != 0 { return Err(hci::event::Error::Vendor( BlueNRGError::AttReadMultiplePermitRequestPartial, )); } let handle_len = data_len / 2; let mut handles = [AttributeHandle(0); MAX_ATTRIBUTE_HANDLE_BUFFER_LEN]; for (i, handle) in handles.iter_mut().enumerate().take(handle_len) { let index = 5 + 2 * i; *handle = AttributeHandle(LittleEndian::read_u16(&buffer[index..])); } Ok(AttReadMultiplePermitRequest { conn_handle: ConnectionHandle(LittleEndian::read_u16(&buffer[2..])), handles_len: handle_len, handles_buf: handles, }) } /// This event is raised when the number of available TX buffers is above a threshold TH (TH = 2). /// The event will be given only if a previous ACI command returned with /// [`InsufficientResources`](AttError::InsufficientResources). #[cfg(feature = "ms")] #[derive(Copy, Clone, Debug)] pub struct GattTxPoolAvailable { /// Connection handle on which the GATT procedure is running. pub conn_handle: ConnectionHandle, /// Indicates the number of elements available in the attrTxPool List. pub available_buffers: usize, } #[cfg(feature = "ms")] fn to_gatt_tx_pool_available( buffer: &[u8], ) -> Result<GattTxPoolAvailable, hci::event::Error<BlueNRGError>> { require_len!(buffer, 6); Ok(GattTxPoolAvailable { conn_handle: ConnectionHandle(LittleEndian::read_u16(&buffer[2..])), available_buffers: LittleEndian::read_u16(&buffer[4..]) as usize, }) } /// This event is given to the application when a prepare write request is received by the server /// from the client. /// /// This event will be given to the application only if the event bit for this event generation is /// set when the characteristic was added. When this event is received, the application has to /// check whether the value being requested for write is allowed to be written and respond with the /// command `gatt_write_response`. Based on the response from the application, the attribute value /// will be modified by the stack. If the write is rejected by the application, then the value of /// the attribute will not be modified and an error response will be sent to the client, with the /// error code as specified by the application. #[cfg(feature = "ms")] #[derive(Copy, Clone)] pub struct AttPrepareWritePermitRequest { /// Connection handle on which the GATT procedure is running. pub conn_handle: ConnectionHandle, /// The handle of the attribute to be written. pub attribute_handle: AttributeHandle, /// The offset of the first octet to be written. pub offset: usize, // Number of valid bytes in `value_buf` value_len: usize, // The data to be written. Only the first `value_len` bytes are valid. value_buf: [u8; MAX_PREPARE_WRITE_PERMIT_REQ_VALUE_LEN], } // The maximum number of bytes in the buffer is the max HCI packet size (255) less the other data in // the packet. #[cfg(feature = "ms")] const MAX_PREPARE_WRITE_PERMIT_REQ_VALUE_LEN: usize = 246; #[cfg(feature = "ms")] impl Debug for AttPrepareWritePermitRequest { fn fmt(&self, f: &mut Formatter) -> FmtResult { write!( f, "{{.conn_handle = {:?}, .attribute_handle = {:?}, .offset = {:?}, .value = {:?}", self.conn_handle, self.attribute_handle, self.offset, first_16(self.value()) ) } } #[cfg(feature = "ms")] impl AttPrepareWritePermitRequest { /// Returns the data to be written. pub fn value(&self) -> &[u8] { &self.value_buf[..self.value_len] } } #[cfg(feature = "ms")] fn to_att_prepare_write_permit_request( buffer: &[u8], ) -> Result<AttPrepareWritePermitRequest, hci::event::Error<BlueNRGError>> { require_len_at_least!(buffer, 9); let data_len = buffer[8] as usize; require_len!(buffer, 9 + data_len); let mut value_buf = [0; MAX_PREPARE_WRITE_PERMIT_REQ_VALUE_LEN]; value_buf[..data_len].copy_from_slice(&buffer[9..]); Ok(AttPrepareWritePermitRequest { conn_handle: ConnectionHandle(LittleEndian::read_u16(&buffer[2..])), attribute_handle: AttributeHandle(LittleEndian::read_u16(&buffer[4..])), offset: LittleEndian::read_u16(&buffer[6..]) as usize, value_len: data_len, value_buf, }) }
/// This event is generated in response to an Execute Write Request. See the Bluetooth Core v4.1 /// spec, Vol 3, Part F, section 3.4.6.3 and 3.4.6.4 AttExecuteWriteResponse(ConnectionHandle),
const_test.go
// Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package plugin import ( "fmt" "testing" "github.com/stretchr/testify/require" ) func TestConstToString(t *testing.T) {
Schema: "Schema", Daemon: "Daemon", Uninitialized: "Uninitialized", Ready: "Ready", Dying: "Dying", Disable: "Disable", Connected: "Connected", Disconnect: "Disconnect", ChangeUser: "ChangeUser", PreAuth: "PreAuth", Reject: "Reject", ConnectionEvent(byte(15)): "", } for key, value := range kinds { require.Equal(t, value, key.String()) } }
t.Parallel() kinds := map[fmt.Stringer]string{ Audit: "Audit", Authentication: "Authentication",
setup.py
import os from setuptools import setup, find_packages def read(fn):
setup( name='litewql', version=__import__('litewql').VERSION, description='Lite web queries language', long_description=read('README.md'), long_description_content_type="text/markdown", author='Vadim Sharay', author_email='[email protected]', packages=find_packages(exclude=['tests']), zip_safe=False, include_package_data=True, install_requires=[ "regex" ], classifiers=[ 'Development Status :: 2 - Pre-Alpha', # 'Development Status :: 3 - Alpha', # 'Development Status :: 4 - Beta', # 'Development Status :: 5 - Production/Stable', # 'Development Status :: 6 - Mature', # 'Development Status :: 7 - Inactive', 'Intended Audience :: Developers', 'Intended Audience :: End Users/Desktop', 'Intended Audience :: Information Technology', 'Intended Audience :: Science/Research', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: BSD License', 'Operating System :: POSIX', 'Operating System :: MacOS', 'Operating System :: Unix', 'Programming Language :: Python', # 'Programming Language :: Python :: 2', # 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', "Programming Language :: Python :: Implementation :: PyPy3", 'Topic :: Software Development :: Libraries' ] )
path = os.path.join(os.path.dirname(__file__), fn) try: file = open(path, encoding='utf-8') except TypeError: file = open(path) return file.read()
msgs.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: peggy/v1/msgs.proto package types import ( context "context" fmt "fmt" github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" types "github.com/cosmos/cosmos-sdk/types" _ "github.com/gogo/protobuf/gogoproto" grpc1 "github.com/gogo/protobuf/grpc" proto "github.com/gogo/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" io "io" math "math" math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // MsgSetOrchestratorAddress // this message allows validators to delegate their voting responsibilities // to a given key. This key is then used as an optional authentication method // for sigining oracle claims // VALIDATOR // The validator field is a cosmosvaloper1... string (i.e. sdk.ValAddress) // that references a validator in the active set // ORCHESTRATOR // The orchestrator field is a cosmos1... string (i.e. sdk.AccAddress) that // references the key that is being delegated to // ETH_ADDRESS // This is a hex encoded 0x Ethereum public key that will be used by this validator // on Ethereum type MsgSetOrchestratorAddress struct { Validator string `protobuf:"bytes,1,opt,name=validator,proto3" json:"validator,omitempty"` Orchestrator string `protobuf:"bytes,2,opt,name=orchestrator,proto3" json:"orchestrator,omitempty"` EthAddress string `protobuf:"bytes,3,opt,name=eth_address,json=ethAddress,proto3" json:"eth_address,omitempty"` } func (m *MsgSetOrchestratorAddress) Reset() { *m = MsgSetOrchestratorAddress{} } func (m *MsgSetOrchestratorAddress) String() string { return proto.CompactTextString(m) } func (*MsgSetOrchestratorAddress) ProtoMessage() {} func (*MsgSetOrchestratorAddress) Descriptor() ([]byte, []int) { return fileDescriptor_75b6627b296db358, []int{0} } func (m *MsgSetOrchestratorAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MsgSetOrchestratorAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MsgSetOrchestratorAddress.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *MsgSetOrchestratorAddress) XXX_Merge(src proto.Message) { xxx_messageInfo_MsgSetOrchestratorAddress.Merge(m, src) } func (m *MsgSetOrchestratorAddress) XXX_Size() int { return m.Size() } func (m *MsgSetOrchestratorAddress) XXX_DiscardUnknown() { xxx_messageInfo_MsgSetOrchestratorAddress.DiscardUnknown(m) } var xxx_messageInfo_MsgSetOrchestratorAddress proto.InternalMessageInfo func (m *MsgSetOrchestratorAddress) GetValidator() string { if m != nil { return m.Validator } return "" } func (m *MsgSetOrchestratorAddress) GetOrchestrator() string { if m != nil { return m.Orchestrator } return "" } func (m *MsgSetOrchestratorAddress) GetEthAddress() string { if m != nil { return m.EthAddress } return "" } type MsgSetOrchestratorAddressResponse struct { } func (m *MsgSetOrchestratorAddressResponse) Reset() { *m = MsgSetOrchestratorAddressResponse{} } func (m *MsgSetOrchestratorAddressResponse) String() string { return proto.CompactTextString(m) } func (*MsgSetOrchestratorAddressResponse) ProtoMessage() {} func (*MsgSetOrchestratorAddressResponse) Descriptor() ([]byte, []int) { return fileDescriptor_75b6627b296db358, []int{1} } func (m *MsgSetOrchestratorAddressResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MsgSetOrchestratorAddressResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MsgSetOrchestratorAddressResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *MsgSetOrchestratorAddressResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_MsgSetOrchestratorAddressResponse.Merge(m, src) } func (m *MsgSetOrchestratorAddressResponse) XXX_Size() int { return m.Size() } func (m *MsgSetOrchestratorAddressResponse) XXX_DiscardUnknown() { xxx_messageInfo_MsgSetOrchestratorAddressResponse.DiscardUnknown(m) } var xxx_messageInfo_MsgSetOrchestratorAddressResponse proto.InternalMessageInfo // MsgValsetConfirm // this is the message sent by the validators when they wish to submit their // signatures over the validator set at a given block height. A validator must // first call MsgSetEthAddress to set their Ethereum address to be used for // signing. Then someone (anyone) must make a ValsetRequest the request is // essentially a messaging mechanism to determine which block all validators // should submit signatures over. Finally validators sign the validator set, // powers, and Ethereum addresses of the entire validator set at the height of a // ValsetRequest and submit that signature with this message. // // If a sufficient number of validators (66% of voting power) (A) have set // Ethereum addresses and (B) submit ValsetConfirm messages with their // signatures it is then possible for anyone to view these signatures in the // chain store and submit them to Ethereum to update the validator set // ------------- type MsgValsetConfirm struct { Nonce uint64 `protobuf:"varint,1,opt,name=nonce,proto3" json:"nonce,omitempty"` Orchestrator string `protobuf:"bytes,2,opt,name=orchestrator,proto3" json:"orchestrator,omitempty"` EthAddress string `protobuf:"bytes,3,opt,name=eth_address,json=ethAddress,proto3" json:"eth_address,omitempty"` Signature string `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"` } func (m *MsgValsetConfirm) Reset() { *m = MsgValsetConfirm{} } func (m *MsgValsetConfirm) String() string { return proto.CompactTextString(m) } func (*MsgValsetConfirm) ProtoMessage() {} func (*MsgValsetConfirm) Descriptor() ([]byte, []int) { return fileDescriptor_75b6627b296db358, []int{2} } func (m *MsgValsetConfirm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MsgValsetConfirm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MsgValsetConfirm.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *MsgValsetConfirm) XXX_Merge(src proto.Message) { xxx_messageInfo_MsgValsetConfirm.Merge(m, src) } func (m *MsgValsetConfirm) XXX_Size() int { return m.Size() } func (m *MsgValsetConfirm) XXX_DiscardUnknown() { xxx_messageInfo_MsgValsetConfirm.DiscardUnknown(m) } var xxx_messageInfo_MsgValsetConfirm proto.InternalMessageInfo func (m *MsgValsetConfirm) GetNonce() uint64 { if m != nil { return m.Nonce } return 0 } func (m *MsgValsetConfirm) GetOrchestrator() string { if m != nil { return m.Orchestrator } return "" } func (m *MsgValsetConfirm) GetEthAddress() string { if m != nil { return m.EthAddress } return "" } func (m *MsgValsetConfirm) GetSignature() string { if m != nil { return m.Signature } return "" } type MsgValsetConfirmResponse struct { } func (m *MsgValsetConfirmResponse) Reset() { *m = MsgValsetConfirmResponse{} } func (m *MsgValsetConfirmResponse) String() string { return proto.CompactTextString(m) } func (*MsgValsetConfirmResponse) ProtoMessage() {} func (*MsgValsetConfirmResponse) Descriptor() ([]byte, []int) { return fileDescriptor_75b6627b296db358, []int{3} } func (m *MsgValsetConfirmResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MsgValsetConfirmResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MsgValsetConfirmResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *MsgValsetConfirmResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_MsgValsetConfirmResponse.Merge(m, src) } func (m *MsgValsetConfirmResponse) XXX_Size() int { return m.Size() } func (m *MsgValsetConfirmResponse) XXX_DiscardUnknown() { xxx_messageInfo_MsgValsetConfirmResponse.DiscardUnknown(m) } var xxx_messageInfo_MsgValsetConfirmResponse proto.InternalMessageInfo // MsgSendToEth // This is the message that a user calls when they want to bridge an asset // it will later be removed when it is included in a batch and successfully // submitted tokens are removed from the users balance immediately // ------------- // AMOUNT: // the coin to send across the bridge, note the restriction that this is a // single coin not a set of coins that is normal in other Cosmos messages // FEE: // the fee paid for the bridge, distinct from the fee paid to the chain to // actually send this message in the first place. So a successful send has // two layers of fees for the user type MsgSendToEth struct { Sender string `protobuf:"bytes,1,opt,name=sender,proto3" json:"sender,omitempty"` EthDest string `protobuf:"bytes,2,opt,name=eth_dest,json=ethDest,proto3" json:"eth_dest,omitempty"` Amount types.Coin `protobuf:"bytes,3,opt,name=amount,proto3" json:"amount"` BridgeFee types.Coin `protobuf:"bytes,4,opt,name=bridge_fee,json=bridgeFee,proto3" json:"bridge_fee"` } func (m *MsgSendToEth) Reset() { *m = MsgSendToEth{} } func (m *MsgSendToEth) String() string { return proto.CompactTextString(m) } func (*MsgSendToEth) ProtoMessage() {} func (*MsgSendToEth) Descriptor() ([]byte, []int) { return fileDescriptor_75b6627b296db358, []int{4} } func (m *MsgSendToEth) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MsgSendToEth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MsgSendToEth.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *MsgSendToEth) XXX_Merge(src proto.Message) { xxx_messageInfo_MsgSendToEth.Merge(m, src) } func (m *MsgSendToEth) XXX_Size() int { return m.Size() } func (m *MsgSendToEth) XXX_DiscardUnknown() { xxx_messageInfo_MsgSendToEth.DiscardUnknown(m) } var xxx_messageInfo_MsgSendToEth proto.InternalMessageInfo func (m *MsgSendToEth) GetSender() string { if m != nil { return m.Sender } return "" } func (m *MsgSendToEth) GetEthDest() string { if m != nil { return m.EthDest } return "" } func (m *MsgSendToEth) GetAmount() types.Coin { if m != nil { return m.Amount } return types.Coin{} } func (m *MsgSendToEth) GetBridgeFee() types.Coin { if m != nil { return m.BridgeFee } return types.Coin{} } type MsgSendToEthResponse struct { } func (m *MsgSendToEthResponse) Reset() { *m = MsgSendToEthResponse{} } func (m *MsgSendToEthResponse) String() string { return proto.CompactTextString(m) } func (*MsgSendToEthResponse) ProtoMessage() {} func (*MsgSendToEthResponse) Descriptor() ([]byte, []int) { return fileDescriptor_75b6627b296db358, []int{5} } func (m *MsgSendToEthResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MsgSendToEthResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MsgSendToEthResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *MsgSendToEthResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_MsgSendToEthResponse.Merge(m, src) } func (m *MsgSendToEthResponse) XXX_Size() int { return m.Size() } func (m *MsgSendToEthResponse) XXX_DiscardUnknown() { xxx_messageInfo_MsgSendToEthResponse.DiscardUnknown(m) } var xxx_messageInfo_MsgSendToEthResponse proto.InternalMessageInfo // MsgRequestBatch // this is a message anyone can send that requests a batch of transactions to // send across the bridge be created for whatever block height this message is // included in. This acts as a coordination point, the handler for this message // looks at the AddToOutgoingPool tx's in the store and generates a batch, also // available in the store tied to this message. The validators then grab this // batch, sign it, submit the signatures with a MsgConfirmBatch before a relayer // can finally submit the batch // ------------- type MsgRequestBatch struct { Orchestrator string `protobuf:"bytes,1,opt,name=orchestrator,proto3" json:"orchestrator,omitempty"` Denom string `protobuf:"bytes,2,opt,name=denom,proto3" json:"denom,omitempty"` } func (m *MsgRequestBatch) Reset() { *m = MsgRequestBatch{} } func (m *MsgRequestBatch) String() string { return proto.CompactTextString(m) } func (*MsgRequestBatch) ProtoMessage() {} func (*MsgRequestBatch) Descriptor() ([]byte, []int) { return fileDescriptor_75b6627b296db358, []int{6} } func (m *MsgRequestBatch) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MsgRequestBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MsgRequestBatch.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *MsgRequestBatch) XXX_Merge(src proto.Message) { xxx_messageInfo_MsgRequestBatch.Merge(m, src) } func (m *MsgRequestBatch) XXX_Size() int { return m.Size() } func (m *MsgRequestBatch) XXX_DiscardUnknown() { xxx_messageInfo_MsgRequestBatch.DiscardUnknown(m) } var xxx_messageInfo_MsgRequestBatch proto.InternalMessageInfo func (m *MsgRequestBatch) GetOrchestrator() string { if m != nil { return m.Orchestrator } return "" } func (m *MsgRequestBatch) GetDenom() string { if m != nil { return m.Denom } return "" } type MsgRequestBatchResponse struct { } func (m *MsgRequestBatchResponse) Reset() { *m = MsgRequestBatchResponse{} } func (m *MsgRequestBatchResponse) String() string { return proto.CompactTextString(m) } func (*MsgRequestBatchResponse) ProtoMessage() {} func (*MsgRequestBatchResponse) Descriptor() ([]byte, []int) { return fileDescriptor_75b6627b296db358, []int{7} } func (m *MsgRequestBatchResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MsgRequestBatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MsgRequestBatchResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *MsgRequestBatchResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_MsgRequestBatchResponse.Merge(m, src) } func (m *MsgRequestBatchResponse) XXX_Size() int { return m.Size() } func (m *MsgRequestBatchResponse) XXX_DiscardUnknown() { xxx_messageInfo_MsgRequestBatchResponse.DiscardUnknown(m) } var xxx_messageInfo_MsgRequestBatchResponse proto.InternalMessageInfo // MsgConfirmBatch // When validators observe a MsgRequestBatch they form a batch by ordering // transactions currently in the txqueue in order of highest to lowest fee, // cutting off when the batch either reaches a hardcoded maximum size (to be // decided, probably around 100) or when transactions stop being profitable // (TODO determine this without nondeterminism) This message includes the batch // as well as an Ethereum signature over this batch by the validator // ------------- type MsgConfirmBatch struct { Nonce uint64 `protobuf:"varint,1,opt,name=nonce,proto3" json:"nonce,omitempty"` TokenContract string `protobuf:"bytes,2,opt,name=token_contract,json=tokenContract,proto3" json:"token_contract,omitempty"` EthSigner string `protobuf:"bytes,3,opt,name=eth_signer,json=ethSigner,proto3" json:"eth_signer,omitempty"` Orchestrator string `protobuf:"bytes,4,opt,name=orchestrator,proto3" json:"orchestrator,omitempty"` Signature string `protobuf:"bytes,5,opt,name=signature,proto3" json:"signature,omitempty"` } func (m *MsgConfirmBatch) Reset() { *m = MsgConfirmBatch{} } func (m *MsgConfirmBatch) String() string { return proto.CompactTextString(m) } func (*MsgConfirmBatch) ProtoMessage() {} func (*MsgConfirmBatch) Descriptor() ([]byte, []int) { return fileDescriptor_75b6627b296db358, []int{8} } func (m *MsgConfirmBatch) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MsgConfirmBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MsgConfirmBatch.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *MsgConfirmBatch) XXX_Merge(src proto.Message) { xxx_messageInfo_MsgConfirmBatch.Merge(m, src) } func (m *MsgConfirmBatch) XXX_Size() int { return m.Size() } func (m *MsgConfirmBatch) XXX_DiscardUnknown() { xxx_messageInfo_MsgConfirmBatch.DiscardUnknown(m) } var xxx_messageInfo_MsgConfirmBatch proto.InternalMessageInfo func (m *MsgConfirmBatch) GetNonce() uint64 { if m != nil { return m.Nonce } return 0 } func (m *MsgConfirmBatch) GetTokenContract() string { if m != nil { return m.TokenContract } return "" } func (m *MsgConfirmBatch) GetEthSigner() string { if m != nil { return m.EthSigner } return "" } func (m *MsgConfirmBatch) GetOrchestrator() string { if m != nil { return m.Orchestrator } return "" } func (m *MsgConfirmBatch) GetSignature() string { if m != nil { return m.Signature } return "" } type MsgConfirmBatchResponse struct { } func (m *MsgConfirmBatchResponse) Reset() { *m = MsgConfirmBatchResponse{} } func (m *MsgConfirmBatchResponse) String() string { return proto.CompactTextString(m) } func (*MsgConfirmBatchResponse) ProtoMessage() {} func (*MsgConfirmBatchResponse) Descriptor() ([]byte, []int) { return fileDescriptor_75b6627b296db358, []int{9} } func (m *MsgConfirmBatchResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MsgConfirmBatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MsgConfirmBatchResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *MsgConfirmBatchResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_MsgConfirmBatchResponse.Merge(m, src) } func (m *MsgConfirmBatchResponse) XXX_Size() int { return m.Size() } func (m *MsgConfirmBatchResponse) XXX_DiscardUnknown() { xxx_messageInfo_MsgConfirmBatchResponse.DiscardUnknown(m) } var xxx_messageInfo_MsgConfirmBatchResponse proto.InternalMessageInfo // EthereumBridgeDepositClaim // When more than 66% of the active validator set has // claimed to have seen the deposit enter the ethereum blockchain coins are // issued to the Cosmos address in question // ------------- type MsgDepositClaim struct { EventNonce uint64 `protobuf:"varint,1,opt,name=event_nonce,json=eventNonce,proto3" json:"event_nonce,omitempty"` BlockHeight uint64 `protobuf:"varint,2,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` TokenContract string `protobuf:"bytes,3,opt,name=token_contract,json=tokenContract,proto3" json:"token_contract,omitempty"` Amount github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,4,opt,name=amount,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"amount"` EthereumSender string `protobuf:"bytes,5,opt,name=ethereum_sender,json=ethereumSender,proto3" json:"ethereum_sender,omitempty"` CosmosReceiver string `protobuf:"bytes,6,opt,name=cosmos_receiver,json=cosmosReceiver,proto3" json:"cosmos_receiver,omitempty"` Orchestrator string `protobuf:"bytes,7,opt,name=orchestrator,proto3" json:"orchestrator,omitempty"` } func (m *MsgDepositClaim) Reset() { *m = MsgDepositClaim{} } func (m *MsgDepositClaim) String() string { return proto.CompactTextString(m) } func (*MsgDepositClaim) ProtoMessage() {} func (*MsgDepositClaim) Descriptor() ([]byte, []int) { return fileDescriptor_75b6627b296db358, []int{10} } func (m *MsgDepositClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MsgDepositClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MsgDepositClaim.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *MsgDepositClaim) XXX_Merge(src proto.Message) { xxx_messageInfo_MsgDepositClaim.Merge(m, src) } func (m *MsgDepositClaim) XXX_Size() int { return m.Size() } func (m *MsgDepositClaim) XXX_DiscardUnknown() { xxx_messageInfo_MsgDepositClaim.DiscardUnknown(m) } var xxx_messageInfo_MsgDepositClaim proto.InternalMessageInfo func (m *MsgDepositClaim) GetEventNonce() uint64 { if m != nil { return m.EventNonce } return 0 } func (m *MsgDepositClaim) GetBlockHeight() uint64 { if m != nil { return m.BlockHeight } return 0 } func (m *MsgDepositClaim) GetTokenContract() string { if m != nil { return m.TokenContract } return "" } func (m *MsgDepositClaim) GetEthereumSender() string { if m != nil { return m.EthereumSender } return "" } func (m *MsgDepositClaim) GetCosmosReceiver() string { if m != nil { return m.CosmosReceiver } return "" } func (m *MsgDepositClaim) GetOrchestrator() string { if m != nil { return m.Orchestrator } return "" } type MsgDepositClaimResponse struct { } func (m *MsgDepositClaimResponse) Reset() { *m = MsgDepositClaimResponse{} } func (m *MsgDepositClaimResponse) String() string { return proto.CompactTextString(m) } func (*MsgDepositClaimResponse) ProtoMessage() {} func (*MsgDepositClaimResponse) Descriptor() ([]byte, []int) { return fileDescriptor_75b6627b296db358, []int{11} } func (m *MsgDepositClaimResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MsgDepositClaimResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MsgDepositClaimResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *MsgDepositClaimResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_MsgDepositClaimResponse.Merge(m, src) } func (m *MsgDepositClaimResponse) XXX_Size() int { return m.Size() } func (m *MsgDepositClaimResponse) XXX_DiscardUnknown() { xxx_messageInfo_MsgDepositClaimResponse.DiscardUnknown(m) } var xxx_messageInfo_MsgDepositClaimResponse proto.InternalMessageInfo // WithdrawClaim claims that a batch of withdrawal // operations on the bridge contract was executed. type MsgWithdrawClaim struct { EventNonce uint64 `protobuf:"varint,1,opt,name=event_nonce,json=eventNonce,proto3" json:"event_nonce,omitempty"` BlockHeight uint64 `protobuf:"varint,2,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` BatchNonce uint64 `protobuf:"varint,3,opt,name=batch_nonce,json=batchNonce,proto3" json:"batch_nonce,omitempty"` TokenContract string `protobuf:"bytes,4,opt,name=token_contract,json=tokenContract,proto3" json:"token_contract,omitempty"` Orchestrator string `protobuf:"bytes,5,opt,name=orchestrator,proto3" json:"orchestrator,omitempty"` } func (m *MsgWithdrawClaim) Reset() { *m = MsgWithdrawClaim{} } func (m *MsgWithdrawClaim) String() string { return proto.CompactTextString(m) } func (*MsgWithdrawClaim) ProtoMessage() {} func (*MsgWithdrawClaim) Descriptor() ([]byte, []int) { return fileDescriptor_75b6627b296db358, []int{12} } func (m *MsgWithdrawClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MsgWithdrawClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MsgWithdrawClaim.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *MsgWithdrawClaim) XXX_Merge(src proto.Message) { xxx_messageInfo_MsgWithdrawClaim.Merge(m, src) } func (m *MsgWithdrawClaim) XXX_Size() int { return m.Size() } func (m *MsgWithdrawClaim) XXX_DiscardUnknown() { xxx_messageInfo_MsgWithdrawClaim.DiscardUnknown(m) } var xxx_messageInfo_MsgWithdrawClaim proto.InternalMessageInfo func (m *MsgWithdrawClaim) GetEventNonce() uint64 { if m != nil { return m.EventNonce } return 0 } func (m *MsgWithdrawClaim) GetBlockHeight() uint64 { if m != nil { return m.BlockHeight } return 0 } func (m *MsgWithdrawClaim) GetBatchNonce() uint64 { if m != nil { return m.BatchNonce } return 0 } func (m *MsgWithdrawClaim) GetTokenContract() string { if m != nil { return m.TokenContract } return "" } func (m *MsgWithdrawClaim) GetOrchestrator() string { if m != nil { return m.Orchestrator } return "" } type MsgWithdrawClaimResponse struct { } func (m *MsgWithdrawClaimResponse) Reset() { *m = MsgWithdrawClaimResponse{} } func (m *MsgWithdrawClaimResponse) String() string { return proto.CompactTextString(m) } func (*MsgWithdrawClaimResponse) ProtoMessage() {} func (*MsgWithdrawClaimResponse) Descriptor() ([]byte, []int) { return fileDescriptor_75b6627b296db358, []int{13} } func (m *MsgWithdrawClaimResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MsgWithdrawClaimResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MsgWithdrawClaimResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *MsgWithdrawClaimResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_MsgWithdrawClaimResponse.Merge(m, src) } func (m *MsgWithdrawClaimResponse) XXX_Size() int { return m.Size() } func (m *MsgWithdrawClaimResponse) XXX_DiscardUnknown() { xxx_messageInfo_MsgWithdrawClaimResponse.DiscardUnknown(m) } var xxx_messageInfo_MsgWithdrawClaimResponse proto.InternalMessageInfo // ERC20DeployedClaim allows the Cosmos module // to learn about an ERC20 that someone deployed to type MsgERC20DeployedClaim struct { EventNonce uint64 `protobuf:"varint,1,opt,name=event_nonce,json=eventNonce,proto3" json:"event_nonce,omitempty"` BlockHeight uint64 `protobuf:"varint,2,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` CosmosDenom string `protobuf:"bytes,3,opt,name=cosmos_denom,json=cosmosDenom,proto3" json:"cosmos_denom,omitempty"` TokenContract string `protobuf:"bytes,4,opt,name=token_contract,json=tokenContract,proto3" json:"token_contract,omitempty"` Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` Symbol string `protobuf:"bytes,6,opt,name=symbol,proto3" json:"symbol,omitempty"` Decimals uint64 `protobuf:"varint,7,opt,name=decimals,proto3" json:"decimals,omitempty"` Orchestrator string `protobuf:"bytes,8,opt,name=orchestrator,proto3" json:"orchestrator,omitempty"` } func (m *MsgERC20DeployedClaim) Reset() { *m = MsgERC20DeployedClaim{} } func (m *MsgERC20DeployedClaim) String() string { return proto.CompactTextString(m) } func (*MsgERC20DeployedClaim) ProtoMessage() {} func (*MsgERC20DeployedClaim) Descriptor() ([]byte, []int) { return fileDescriptor_75b6627b296db358, []int{14} } func (m *MsgERC20DeployedClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MsgERC20DeployedClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MsgERC20DeployedClaim.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *MsgERC20DeployedClaim) XXX_Merge(src proto.Message) { xxx_messageInfo_MsgERC20DeployedClaim.Merge(m, src) } func (m *MsgERC20DeployedClaim) XXX_Size() int { return m.Size() } func (m *MsgERC20DeployedClaim) XXX_DiscardUnknown() { xxx_messageInfo_MsgERC20DeployedClaim.DiscardUnknown(m) } var xxx_messageInfo_MsgERC20DeployedClaim proto.InternalMessageInfo func (m *MsgERC20DeployedClaim) GetEventNonce() uint64 { if m != nil { return m.EventNonce } return 0 } func (m *MsgERC20DeployedClaim) GetBlockHeight() uint64 { if m != nil { return m.BlockHeight } return 0 } func (m *MsgERC20DeployedClaim) GetCosmosDenom() string { if m != nil { return m.CosmosDenom } return "" } func (m *MsgERC20DeployedClaim) GetTokenContract() string { if m != nil { return m.TokenContract } return "" } func (m *MsgERC20DeployedClaim) GetName() string { if m != nil { return m.Name } return "" } func (m *MsgERC20DeployedClaim) GetSymbol() string { if m != nil { return m.Symbol } return "" } func (m *MsgERC20DeployedClaim) GetDecimals() uint64 { if m != nil { return m.Decimals } return 0 } func (m *MsgERC20DeployedClaim) GetOrchestrator() string { if m != nil { return m.Orchestrator } return "" } type MsgERC20DeployedClaimResponse struct { } func (m *MsgERC20DeployedClaimResponse) Reset() { *m = MsgERC20DeployedClaimResponse{} } func (m *MsgERC20DeployedClaimResponse) String() string { return proto.CompactTextString(m) } func (*MsgERC20DeployedClaimResponse) ProtoMessage() {} func (*MsgERC20DeployedClaimResponse) Descriptor() ([]byte, []int) { return fileDescriptor_75b6627b296db358, []int{15} } func (m *MsgERC20DeployedClaimResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MsgERC20DeployedClaimResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MsgERC20DeployedClaimResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *MsgERC20DeployedClaimResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_MsgERC20DeployedClaimResponse.Merge(m, src) } func (m *MsgERC20DeployedClaimResponse) XXX_Size() int { return m.Size() } func (m *MsgERC20DeployedClaimResponse) XXX_DiscardUnknown() { xxx_messageInfo_MsgERC20DeployedClaimResponse.DiscardUnknown(m) } var xxx_messageInfo_MsgERC20DeployedClaimResponse proto.InternalMessageInfo func init() { proto.RegisterType((*MsgSetOrchestratorAddress)(nil), "peggy.v1.MsgSetOrchestratorAddress") proto.RegisterType((*MsgSetOrchestratorAddressResponse)(nil), "peggy.v1.MsgSetOrchestratorAddressResponse") proto.RegisterType((*MsgValsetConfirm)(nil), "peggy.v1.MsgValsetConfirm") proto.RegisterType((*MsgValsetConfirmResponse)(nil), "peggy.v1.MsgValsetConfirmResponse") proto.RegisterType((*MsgSendToEth)(nil), "peggy.v1.MsgSendToEth") proto.RegisterType((*MsgSendToEthResponse)(nil), "peggy.v1.MsgSendToEthResponse") proto.RegisterType((*MsgRequestBatch)(nil), "peggy.v1.MsgRequestBatch") proto.RegisterType((*MsgRequestBatchResponse)(nil), "peggy.v1.MsgRequestBatchResponse") proto.RegisterType((*MsgConfirmBatch)(nil), "peggy.v1.MsgConfirmBatch") proto.RegisterType((*MsgConfirmBatchResponse)(nil), "peggy.v1.MsgConfirmBatchResponse") proto.RegisterType((*MsgDepositClaim)(nil), "peggy.v1.MsgDepositClaim") proto.RegisterType((*MsgDepositClaimResponse)(nil), "peggy.v1.MsgDepositClaimResponse") proto.RegisterType((*MsgWithdrawClaim)(nil), "peggy.v1.MsgWithdrawClaim") proto.RegisterType((*MsgWithdrawClaimResponse)(nil), "peggy.v1.MsgWithdrawClaimResponse") proto.RegisterType((*MsgERC20DeployedClaim)(nil), "peggy.v1.MsgERC20DeployedClaim") proto.RegisterType((*MsgERC20DeployedClaimResponse)(nil), "peggy.v1.MsgERC20DeployedClaimResponse") } func init() { proto.RegisterFile("peggy/v1/msgs.proto", fileDescriptor_75b6627b296db358) } var fileDescriptor_75b6627b296db358 = []byte{ // 1046 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xcf, 0x6f, 0xe3, 0x44, 0x14, 0xae, 0xdb, 0xf4, 0xd7, 0x6b, 0xba, 0x45, 0xa6, 0xdb, 0x26, 0xd6, 0x36, 0x69, 0xbd, 0xb0, 0x45, 0xa0, 0xb5, 0xdb, 0x72, 0xe0, 0x86, 0x44, 0xd3, 0x5d, 0xf1, 0x43, 0x59, 0xa4, 0x14, 0x81, 0xc4, 0xc5, 0x72, 0xec, 0xb7, 0xb6, 0x55, 0x7b, 0x26, 0xf5, 0x4c, 0xb2, 0xf4, 0xc2, 0x81, 0x03, 0x17, 0x2e, 0x20, 0x24, 0xfe, 0x0d, 0xee, 0x1c, 0x38, 0x70, 0xda, 0xe3, 0x4a, 0x5c, 0x10, 0x87, 0x15, 0x6a, 0xf9, 0x43, 0x90, 0x67, 0x26, 0x8e, 0x9d, 0x38, 0xa2, 0x87, 0x9e, 0xec, 0xf9, 0xe6, 0xf9, 0x7d, 0xef, 0x7b, 0xef, 0xcd, 0x1b, 0xc3, 0x9b, 0x03, 0x0c, 0x82, 0x2b, 0x7b, 0x74, 0x6c, 0x27, 0x2c, 0x60, 0xd6, 0x20, 0xa5, 0x9c, 0xea, 0x6b, 0x02, 0xb4, 0x46, 0xc7, 0x46, 0xcb, 0xa3, 0x2c, 0xa1, 0xcc, 0xee, 0xbb, 0x0c, 0xed, 0xd1, 0x71, 0x1f, 0xb9, 0x7b, 0x6c, 0x7b, 0x34, 0x22, 0xd2, 0xd2, 0xd8, 0x0e, 0x68, 0x40, 0xc5, 0xab, 0x9d, 0xbd, 0x29, 0xf4, 0x41, 0x40, 0x69, 0x10, 0xa3, 0xed, 0x0e, 0x22, 0xdb, 0x25, 0x84, 0x72, 0x97, 0x47, 0x94, 0x28, 0xef, 0xe6, 0xb7, 0xd0, 0xec, 0xb2, 0xe0, 0x1c, 0xf9, 0xe7, 0xa9, 0x17, 0x22, 0xe3, 0xa9, 0xcb, 0x69, 0xfa, 0x91, 0xef, 0xa7, 0xc8, 0x98, 0xfe, 0x00, 0xd6, 0x47, 0x6e, 0x1c, 0xf9, 0x19, 0xd6, 0xd0, 0xf6, 0xb5, 0x77, 0xd6, 0x7b, 0x13, 0x40, 0x37, 0xa1, 0x4e, 0x0b, 0x1f, 0x35, 0x16, 0x85, 0x41, 0x09, 0xd3, 0xdb, 0xb0, 0x81, 0x3c, 0x74, 0x5c, 0xe9, 0xb0, 0xb1, 0x24, 0x4c, 0x00, 0x79, 0xa8, 0x28, 0xcc, 0x87, 0x70, 0x30, 0x97, 0xbf, 0x87, 0x6c, 0x40, 0x09, 0x43, 0xf3, 0x07, 0x0d, 0xde, 0xe8, 0xb2, 0xe0, 0x4b, 0x37, 0x66, 0xc8, 0x3b, 0x94, 0x3c, 0x8f, 0xd2, 0x44, 0xdf, 0x86, 0x65, 0x42, 0x89, 0x87, 0x22, 0xb0, 0x5a, 0x4f, 0x2e, 0xee, 0x24, 0xa8, 0x4c, 0x37, 0x8b, 0x02, 0xe2, 0xf2, 0x61, 0x8a, 0x8d, 0x9a, 0xd4, 0x9d, 0x03, 0xa6, 0x01, 0x8d, 0xe9, 0x60, 0xf2, 0x48, 0x7f, 0xd3, 0xa0, 0x2e, 0xf4, 0x10, 0xff, 0x0b, 0xfa, 0x84, 0x87, 0xfa, 0x0e, 0xac, 0x30, 0x24, 0x3e, 0x8e, 0xf3, 0xa7, 0x56, 0x7a, 0x13, 0xd6, 0xb2, 0x18, 0x7c, 0x64, 0x5c, 0xc5, 0xb8, 0x8a, 0x3c, 0x3c, 0x43, 0xc6, 0xf5, 0x0f, 0x60, 0xc5, 0x4d, 0xe8, 0x90, 0x70, 0x11, 0xd9, 0xc6, 0x49, 0xd3, 0x92, 0x75, 0xb7, 0xb2, 0xba, 0x5b, 0xaa, 0xee, 0x56, 0x87, 0x46, 0xe4, 0xb4, 0xf6, 0xf2, 0x75, 0x7b, 0xa1, 0xa7, 0xcc, 0xf5, 0x0f, 0x01, 0xfa, 0x69, 0xe4, 0x07, 0xe8, 0x3c, 0x47, 0x19, 0xf7, 0x2d, 0x3e, 0x5e, 0x97, 0x9f, 0x3c, 0x45, 0x34, 0x77, 0x60, 0xbb, 0x18, 0x7b, 0x2e, 0xea, 0x33, 0xd8, 0xea, 0xb2, 0xa0, 0x87, 0x97, 0x43, 0x64, 0xfc, 0xd4, 0xe5, 0x5e, 0x38, 0x93, 0x66, 0xad, 0x22, 0xcd, 0xdb, 0xb0, 0xec, 0x23, 0xa1, 0x89, 0xd2, 0x27, 0x17, 0x66, 0x13, 0x76, 0xa7, 0x9c, 0xe5, 0x3c, 0xbf, 0x6a, 0x82, 0x48, 0xe5, 0x54, 0x12, 0x55, 0x57, 0xf9, 0x6d, 0xb8, 0xc7, 0xe9, 0x05, 0x12, 0xc7, 0xa3, 0x84, 0xa7, 0xae, 0x37, 0xce, 0xe1, 0xa6, 0x40, 0x3b, 0x0a, 0xd4, 0xf7, 0x20, 0xab, 0xaa, 0x93, 0x95, 0x0e, 0x53, 0x55, 0xe7, 0x75, 0xe4, 0xe1, 0xb9, 0x00, 0x66, 0x44, 0xd4, 0x2a, 0x44, 0x94, 0x5a, 0x61, 0x79, 0xba, 0x15, 0xa4, 0x98, 0x62, 0xc0, 0x93, 0x4e, 0x58, 0x14, 0x62, 0xce, 0x70, 0x40, 0x59, 0xc4, 0x3b, 0xb1, 0x1b, 0x25, 0xa2, 0xf1, 0x46, 0x48, 0xb8, 0x53, 0x94, 0x04, 0x02, 0x7a, 0x26, 0x74, 0x1d, 0x40, 0xbd, 0x1f, 0x53, 0xef, 0xc2, 0x09, 0x31, 0x0a, 0x42, 0xa9, 0xaa, 0xd6, 0xdb, 0x10, 0xd8, 0xc7, 0x02, 0xaa, 0x90, 0xbe, 0x54, 0x25, 0xfd, 0x69, 0xde, 0x44, 0x42, 0xd5, 0xa9, 0x95, 0x15, 0xfb, 0xef, 0xd7, 0xed, 0x47, 0x41, 0xc4, 0xc3, 0x61, 0xdf, 0xf2, 0x68, 0x62, 0xab, 0x71, 0x22, 0x1f, 0x8f, 0x99, 0x7f, 0x61, 0xf3, 0xab, 0x01, 0x32, 0xeb, 0x13, 0xc2, 0xf3, 0x9e, 0x3a, 0x84, 0x2d, 0xe4, 0x21, 0xa6, 0x38, 0x4c, 0x1c, 0xd5, 0xc8, 0x32, 0x0b, 0xf7, 0xc6, 0xf0, 0xb9, 0x6c, 0xe8, 0x43, 0xd8, 0x92, 0x8e, 0x9c, 0x14, 0x3d, 0x8c, 0x46, 0x98, 0x36, 0x56, 0xa4, 0xa1, 0x84, 0x7b, 0x0a, 0x9d, 0xc9, 0xfa, 0xea, 0x6c, 0xd6, 0x55, 0x5e, 0x8b, 0xb9, 0xcb, 0xf3, 0xfa, 0x87, 0x9c, 0x05, 0x5f, 0x45, 0x3c, 0xf4, 0x53, 0xf7, 0xc5, 0xdd, 0x25, 0xb6, 0x0d, 0x1b, 0xfd, 0xac, 0x82, 0xca, 0xc7, 0x92, 0xf4, 0x21, 0xa0, 0x67, 0x73, 0x9a, 0xae, 0x56, 0x95, 0xf9, 0x69, 0x7d, 0xcb, 0x15, 0xfa, 0xe4, 0x08, 0x29, 0x69, 0xc8, 0x05, 0xfe, 0xb4, 0x08, 0xf7, 0xbb, 0x2c, 0x78, 0xd2, 0xeb, 0x9c, 0x1c, 0x9d, 0xe1, 0x20, 0xa6, 0x57, 0xe8, 0xdf, 0x9d, 0xca, 0x03, 0xa8, 0xab, 0x32, 0xc9, 0xb3, 0x29, 0x9b, 0x67, 0x43, 0x62, 0x67, 0x19, 0x74, 0x5b, 0x9d, 0x3a, 0xd4, 0x88, 0x9b, 0x8c, 0x0f, 0x85, 0x78, 0x17, 0xd3, 0xee, 0x2a, 0xe9, 0xd3, 0x58, 0xd5, 0x5e, 0xad, 0x74, 0x03, 0xd6, 0x7c, 0xf4, 0xa2, 0xc4, 0x8d, 0x99, 0xa8, 0x77, 0xad, 0x97, 0xaf, 0x67, 0xf2, 0xb5, 0x56, 0x91, 0xaf, 0x36, 0xec, 0x55, 0xa6, 0x64, 0x9c, 0xb4, 0x93, 0xdf, 0x57, 0x61, 0xa9, 0xcb, 0x02, 0xfd, 0x12, 0x36, 0xcb, 0xb7, 0x84, 0x61, 0x8d, 0xaf, 0x4f, 0x6b, 0x7a, 0x68, 0x1b, 0xe6, 0xfc, 0xbd, 0xbc, 0x1a, 0xfb, 0xdf, 0xfd, 0xf9, 0xef, 0xcf, 0x8b, 0x86, 0xd9, 0xb0, 0xf3, 0xbb, 0x79, 0x24, 0x0c, 0xb3, 0xec, 0x08, 0x86, 0x3e, 0xac, 0x17, 0xc6, 0x7d, 0xc9, 0x65, 0x8e, 0x1b, 0xad, 0x6a, 0x3c, 0xa7, 0xd9, 0x13, 0x34, 0xbb, 0xe6, 0xfd, 0x09, 0x4d, 0x76, 0xda, 0x1c, 0x4e, 0x1d, 0xe4, 0xa1, 0x9e, 0x40, 0xbd, 0x34, 0x7e, 0x9b, 0x25, 0x77, 0xc5, 0x2d, 0xe3, 0x60, 0xee, 0x56, 0x4e, 0xd6, 0x16, 0x64, 0x4d, 0x73, 0x77, 0x42, 0x96, 0x4a, 0x3b, 0x47, 0xb4, 0x7b, 0x46, 0x57, 0x1a, 0xc2, 0x65, 0xba, 0xe2, 0xd6, 0x14, 0x5d, 0xe5, 0x24, 0xac, 0xa0, 0x53, 0xb9, 0x9b, 0xd0, 0x95, 0xc6, 0x64, 0x99, 0xae, 0xb8, 0x35, 0x45, 0x57, 0x39, 0x20, 0x2a, 0xe8, 0x7c, 0x69, 0xe7, 0x78, 0xc2, 0xfd, 0x25, 0x6c, 0x96, 0xa7, 0x47, 0xb9, 0x47, 0x4a, 0x7b, 0x53, 0x3d, 0x52, 0x7d, 0x62, 0x2b, 0x7a, 0xe4, 0x85, 0x32, 0x54, 0x94, 0xdf, 0x6b, 0xa0, 0x57, 0x1d, 0xe8, 0x92, 0xf3, 0x59, 0x03, 0xe3, 0xf0, 0x7f, 0x0c, 0xf2, 0x10, 0x1e, 0x89, 0x10, 0xf6, 0xcd, 0xd6, 0x24, 0x04, 0x4c, 0xbd, 0x93, 0x23, 0xc7, 0x57, 0xe6, 0x2a, 0x90, 0x5f, 0x34, 0xd8, 0x99, 0xf3, 0xb3, 0xf7, 0x70, 0xaa, 0x45, 0xab, 0x8c, 0x8c, 0xf7, 0x6e, 0x61, 0x94, 0x07, 0xf5, 0xae, 0x08, 0xea, 0x2d, 0xd3, 0x2c, 0x36, 0x35, 0x77, 0x8a, 0x27, 0x7b, 0xfc, 0x13, 0x76, 0xfa, 0xe9, 0xcb, 0xeb, 0x96, 0xf6, 0xea, 0xba, 0xa5, 0xfd, 0x73, 0xdd, 0xd2, 0x7e, 0xbc, 0x69, 0x2d, 0xbc, 0xba, 0x69, 0x2d, 0xfc, 0x75, 0xd3, 0x5a, 0xf8, 0xfa, 0xa8, 0x70, 0x63, 0xb9, 0x31, 0x0f, 0xd1, 0x7d, 0x4c, 0x90, 0x2b, 0x97, 0x09, 0xf5, 0x87, 0x31, 0xda, 0xdf, 0xa8, 0xa5, 0xb8, 0xbf, 0xfa, 0x2b, 0xe2, 0xd7, 0xf6, 0xfd, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x32, 0xee, 0x60, 0x4c, 0x4f, 0x0b, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // MsgClient is the client API for Msg service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type MsgClient interface { ValsetConfirm(ctx context.Context, in *MsgValsetConfirm, opts ...grpc.CallOption) (*MsgValsetConfirmResponse, error) SendToEth(ctx context.Context, in *MsgSendToEth, opts ...grpc.CallOption) (*MsgSendToEthResponse, error) RequestBatch(ctx context.Context, in *MsgRequestBatch, opts ...grpc.CallOption) (*MsgRequestBatchResponse, error) ConfirmBatch(ctx context.Context, in *MsgConfirmBatch, opts ...grpc.CallOption) (*MsgConfirmBatchResponse, error) DepositClaim(ctx context.Context, in *MsgDepositClaim, opts ...grpc.CallOption) (*MsgDepositClaimResponse, error) WithdrawClaim(ctx context.Context, in *MsgWithdrawClaim, opts ...grpc.CallOption) (*MsgWithdrawClaimResponse, error) ERC20DeployedClaim(ctx context.Context, in *MsgERC20DeployedClaim, opts ...grpc.CallOption) (*MsgERC20DeployedClaimResponse, error) SetOrchestratorAddress(ctx context.Context, in *MsgSetOrchestratorAddress, opts ...grpc.CallOption) (*MsgSetOrchestratorAddressResponse, error) } type msgClient struct { cc grpc1.ClientConn } func NewMsgClient(cc grpc1.ClientConn) MsgClient { return &msgClient{cc} } func (c *msgClient) ValsetConfirm(ctx context.Context, in *MsgValsetConfirm, opts ...grpc.CallOption) (*MsgValsetConfirmResponse, error) { out := new(MsgValsetConfirmResponse) err := c.cc.Invoke(ctx, "/peggy.v1.Msg/ValsetConfirm", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *msgClient) SendToEth(ctx context.Context, in *MsgSendToEth, opts ...grpc.CallOption) (*MsgSendToEthResponse, error) { out := new(MsgSendToEthResponse) err := c.cc.Invoke(ctx, "/peggy.v1.Msg/SendToEth", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *msgClient) RequestBatch(ctx context.Context, in *MsgRequestBatch, opts ...grpc.CallOption) (*MsgRequestBatchResponse, error) { out := new(MsgRequestBatchResponse) err := c.cc.Invoke(ctx, "/peggy.v1.Msg/RequestBatch", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *msgClient) ConfirmBatch(ctx context.Context, in *MsgConfirmBatch, opts ...grpc.CallOption) (*MsgConfirmBatchResponse, error) { out := new(MsgConfirmBatchResponse) err := c.cc.Invoke(ctx, "/peggy.v1.Msg/ConfirmBatch", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *msgClient) DepositClaim(ctx context.Context, in *MsgDepositClaim, opts ...grpc.CallOption) (*MsgDepositClaimResponse, error) { out := new(MsgDepositClaimResponse) err := c.cc.Invoke(ctx, "/peggy.v1.Msg/DepositClaim", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *msgClient) WithdrawClaim(ctx context.Context, in *MsgWithdrawClaim, opts ...grpc.CallOption) (*MsgWithdrawClaimResponse, error) { out := new(MsgWithdrawClaimResponse) err := c.cc.Invoke(ctx, "/peggy.v1.Msg/WithdrawClaim", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *msgClient) ERC20DeployedClaim(ctx context.Context, in *MsgERC20DeployedClaim, opts ...grpc.CallOption) (*MsgERC20DeployedClaimResponse, error) { out := new(MsgERC20DeployedClaimResponse) err := c.cc.Invoke(ctx, "/peggy.v1.Msg/ERC20DeployedClaim", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *msgClient) SetOrchestratorAddress(ctx context.Context, in *MsgSetOrchestratorAddress, opts ...grpc.CallOption) (*MsgSetOrchestratorAddressResponse, error) { out := new(MsgSetOrchestratorAddressResponse) err := c.cc.Invoke(ctx, "/peggy.v1.Msg/SetOrchestratorAddress", in, out, opts...) if err != nil { return nil, err } return out, nil } // MsgServer is the server API for Msg service. type MsgServer interface { ValsetConfirm(context.Context, *MsgValsetConfirm) (*MsgValsetConfirmResponse, error) SendToEth(context.Context, *MsgSendToEth) (*MsgSendToEthResponse, error) RequestBatch(context.Context, *MsgRequestBatch) (*MsgRequestBatchResponse, error) ConfirmBatch(context.Context, *MsgConfirmBatch) (*MsgConfirmBatchResponse, error) DepositClaim(context.Context, *MsgDepositClaim) (*MsgDepositClaimResponse, error) WithdrawClaim(context.Context, *MsgWithdrawClaim) (*MsgWithdrawClaimResponse, error) ERC20DeployedClaim(context.Context, *MsgERC20DeployedClaim) (*MsgERC20DeployedClaimResponse, error) SetOrchestratorAddress(context.Context, *MsgSetOrchestratorAddress) (*MsgSetOrchestratorAddressResponse, error) } // UnimplementedMsgServer can be embedded to have forward compatible implementations. type UnimplementedMsgServer struct { } func (*UnimplementedMsgServer) ValsetConfirm(ctx context.Context, req *MsgValsetConfirm) (*MsgValsetConfirmResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ValsetConfirm not implemented") } func (*UnimplementedMsgServer) SendToEth(ctx context.Context, req *MsgSendToEth) (*MsgSendToEthResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method SendToEth not implemented") } func (*UnimplementedMsgServer) RequestBatch(ctx context.Context, req *MsgRequestBatch) (*MsgRequestBatchResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RequestBatch not implemented") } func (*UnimplementedMsgServer) ConfirmBatch(ctx context.Context, req *MsgConfirmBatch) (*MsgConfirmBatchResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ConfirmBatch not implemented") } func (*UnimplementedMsgServer) DepositClaim(ctx context.Context, req *MsgDepositClaim) (*MsgDepositClaimResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method DepositClaim not implemented") } func (*UnimplementedMsgServer) WithdrawClaim(ctx context.Context, req *MsgWithdrawClaim) (*MsgWithdrawClaimResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method WithdrawClaim not implemented") } func (*UnimplementedMsgServer) ERC20DeployedClaim(ctx context.Context, req *MsgERC20DeployedClaim) (*MsgERC20DeployedClaimResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ERC20DeployedClaim not implemented") } func (*UnimplementedMsgServer) SetOrchestratorAddress(ctx context.Context, req *MsgSetOrchestratorAddress) (*MsgSetOrchestratorAddressResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method SetOrchestratorAddress not implemented") } func RegisterMsgServer(s grpc1.Server, srv MsgServer) { s.RegisterService(&_Msg_serviceDesc, srv) } func _Msg_ValsetConfirm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(MsgValsetConfirm) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MsgServer).ValsetConfirm(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/peggy.v1.Msg/ValsetConfirm", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MsgServer).ValsetConfirm(ctx, req.(*MsgValsetConfirm)) } return interceptor(ctx, in, info, handler) } func _Msg_SendToEth_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(MsgSendToEth) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MsgServer).SendToEth(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/peggy.v1.Msg/SendToEth", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MsgServer).SendToEth(ctx, req.(*MsgSendToEth)) } return interceptor(ctx, in, info, handler) } func _Msg_RequestBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(MsgRequestBatch) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MsgServer).RequestBatch(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/peggy.v1.Msg/RequestBatch", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MsgServer).RequestBatch(ctx, req.(*MsgRequestBatch)) } return interceptor(ctx, in, info, handler) } func _Msg_ConfirmBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(MsgConfirmBatch) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MsgServer).ConfirmBatch(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/peggy.v1.Msg/ConfirmBatch", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MsgServer).ConfirmBatch(ctx, req.(*MsgConfirmBatch)) } return interceptor(ctx, in, info, handler) } func _Msg_DepositClaim_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(MsgDepositClaim) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MsgServer).DepositClaim(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/peggy.v1.Msg/DepositClaim", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MsgServer).DepositClaim(ctx, req.(*MsgDepositClaim)) } return interceptor(ctx, in, info, handler) } func _Msg_WithdrawClaim_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(MsgWithdrawClaim) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MsgServer).WithdrawClaim(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/peggy.v1.Msg/WithdrawClaim", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MsgServer).WithdrawClaim(ctx, req.(*MsgWithdrawClaim)) } return interceptor(ctx, in, info, handler) } func _Msg_ERC20DeployedClaim_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(MsgERC20DeployedClaim) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MsgServer).ERC20DeployedClaim(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/peggy.v1.Msg/ERC20DeployedClaim", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MsgServer).ERC20DeployedClaim(ctx, req.(*MsgERC20DeployedClaim)) } return interceptor(ctx, in, info, handler) } func _Msg_SetOrchestratorAddress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(MsgSetOrchestratorAddress) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MsgServer).SetOrchestratorAddress(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/peggy.v1.Msg/SetOrchestratorAddress", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MsgServer).SetOrchestratorAddress(ctx, req.(*MsgSetOrchestratorAddress)) } return interceptor(ctx, in, info, handler) } var _Msg_serviceDesc = grpc.ServiceDesc{ ServiceName: "peggy.v1.Msg", HandlerType: (*MsgServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "ValsetConfirm", Handler: _Msg_ValsetConfirm_Handler, }, { MethodName: "SendToEth", Handler: _Msg_SendToEth_Handler, }, { MethodName: "RequestBatch", Handler: _Msg_RequestBatch_Handler, }, { MethodName: "ConfirmBatch", Handler: _Msg_ConfirmBatch_Handler, }, { MethodName: "DepositClaim", Handler: _Msg_DepositClaim_Handler, }, { MethodName: "WithdrawClaim", Handler: _Msg_WithdrawClaim_Handler, }, { MethodName: "ERC20DeployedClaim", Handler: _Msg_ERC20DeployedClaim_Handler, }, { MethodName: "SetOrchestratorAddress", Handler: _Msg_SetOrchestratorAddress_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "peggy/v1/msgs.proto", } func (m *MsgSetOrchestratorAddress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MsgSetOrchestratorAddress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *MsgSetOrchestratorAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.EthAddress) > 0 { i -= len(m.EthAddress) copy(dAtA[i:], m.EthAddress) i = encodeVarintMsgs(dAtA, i, uint64(len(m.EthAddress))) i-- dAtA[i] = 0x1a } if len(m.Orchestrator) > 0 { i -= len(m.Orchestrator) copy(dAtA[i:], m.Orchestrator) i = encodeVarintMsgs(dAtA, i, uint64(len(m.Orchestrator))) i-- dAtA[i] = 0x12 } if len(m.Validator) > 0 { i -= len(m.Validator) copy(dAtA[i:], m.Validator) i = encodeVarintMsgs(dAtA, i, uint64(len(m.Validator))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *MsgSetOrchestratorAddressResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MsgSetOrchestratorAddressResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *MsgSetOrchestratorAddressResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l return len(dAtA) - i, nil } func (m *MsgValsetConfirm) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MsgValsetConfirm) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *MsgValsetConfirm) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Signature) > 0 { i -= len(m.Signature) copy(dAtA[i:], m.Signature) i = encodeVarintMsgs(dAtA, i, uint64(len(m.Signature))) i-- dAtA[i] = 0x22 } if len(m.EthAddress) > 0 { i -= len(m.EthAddress) copy(dAtA[i:], m.EthAddress) i = encodeVarintMsgs(dAtA, i, uint64(len(m.EthAddress))) i-- dAtA[i] = 0x1a } if len(m.Orchestrator) > 0 { i -= len(m.Orchestrator) copy(dAtA[i:], m.Orchestrator) i = encodeVarintMsgs(dAtA, i, uint64(len(m.Orchestrator))) i-- dAtA[i] = 0x12 } if m.Nonce != 0 { i = encodeVarintMsgs(dAtA, i, uint64(m.Nonce)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *MsgValsetConfirmResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MsgValsetConfirmResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *MsgValsetConfirmResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l return len(dAtA) - i, nil } func (m *MsgSendToEth) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MsgSendToEth) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *MsgSendToEth) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l { size, err := m.BridgeFee.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintMsgs(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x22 { size, err := m.Amount.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintMsgs(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a if len(m.EthDest) > 0 { i -= len(m.EthDest) copy(dAtA[i:], m.EthDest) i = encodeVarintMsgs(dAtA, i, uint64(len(m.EthDest))) i-- dAtA[i] = 0x12 } if len(m.Sender) > 0 { i -= len(m.Sender) copy(dAtA[i:], m.Sender) i = encodeVarintMsgs(dAtA, i, uint64(len(m.Sender))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *MsgSendToEthResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MsgSendToEthResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *MsgSendToEthResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l return len(dAtA) - i, nil } func (m *MsgRequestBatch) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MsgRequestBatch) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *MsgRequestBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Denom) > 0 { i -= len(m.Denom) copy(dAtA[i:], m.Denom) i = encodeVarintMsgs(dAtA, i, uint64(len(m.Denom))) i-- dAtA[i] = 0x12 } if len(m.Orchestrator) > 0 { i -= len(m.Orchestrator) copy(dAtA[i:], m.Orchestrator) i = encodeVarintMsgs(dAtA, i, uint64(len(m.Orchestrator))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *MsgRequestBatchResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MsgRequestBatchResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *MsgRequestBatchResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l return len(dAtA) - i, nil } func (m *MsgConfirmBatch) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MsgConfirmBatch) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *MsgConfirmBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Signature) > 0 { i -= len(m.Signature) copy(dAtA[i:], m.Signature) i = encodeVarintMsgs(dAtA, i, uint64(len(m.Signature))) i-- dAtA[i] = 0x2a } if len(m.Orchestrator) > 0 { i -= len(m.Orchestrator) copy(dAtA[i:], m.Orchestrator) i = encodeVarintMsgs(dAtA, i, uint64(len(m.Orchestrator))) i-- dAtA[i] = 0x22 } if len(m.EthSigner) > 0 { i -= len(m.EthSigner) copy(dAtA[i:], m.EthSigner) i = encodeVarintMsgs(dAtA, i, uint64(len(m.EthSigner))) i-- dAtA[i] = 0x1a } if len(m.TokenContract) > 0 { i -= len(m.TokenContract) copy(dAtA[i:], m.TokenContract) i = encodeVarintMsgs(dAtA, i, uint64(len(m.TokenContract))) i-- dAtA[i] = 0x12 } if m.Nonce != 0 { i = encodeVarintMsgs(dAtA, i, uint64(m.Nonce)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *MsgConfirmBatchResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MsgConfirmBatchResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *MsgConfirmBatchResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l return len(dAtA) - i, nil } func (m *MsgDepositClaim) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MsgDepositClaim) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *MsgDepositClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Orchestrator) > 0 { i -= len(m.Orchestrator) copy(dAtA[i:], m.Orchestrator) i = encodeVarintMsgs(dAtA, i, uint64(len(m.Orchestrator))) i-- dAtA[i] = 0x3a } if len(m.CosmosReceiver) > 0 { i -= len(m.CosmosReceiver) copy(dAtA[i:], m.CosmosReceiver) i = encodeVarintMsgs(dAtA, i, uint64(len(m.CosmosReceiver))) i-- dAtA[i] = 0x32 } if len(m.EthereumSender) > 0 { i -= len(m.EthereumSender) copy(dAtA[i:], m.EthereumSender) i = encodeVarintMsgs(dAtA, i, uint64(len(m.EthereumSender))) i-- dAtA[i] = 0x2a } { size := m.Amount.Size() i -= size if _, err := m.Amount.MarshalTo(dAtA[i:]); err != nil { return 0, err } i = encodeVarintMsgs(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x22 if len(m.TokenContract) > 0 { i -= len(m.TokenContract) copy(dAtA[i:], m.TokenContract) i = encodeVarintMsgs(dAtA, i, uint64(len(m.TokenContract))) i-- dAtA[i] = 0x1a } if m.BlockHeight != 0 { i = encodeVarintMsgs(dAtA, i, uint64(m.BlockHeight)) i-- dAtA[i] = 0x10 } if m.EventNonce != 0 { i = encodeVarintMsgs(dAtA, i, uint64(m.EventNonce)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *MsgDepositClaimResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MsgDepositClaimResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *MsgDepositClaimResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l return len(dAtA) - i, nil } func (m *MsgWithdrawClaim) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MsgWithdrawClaim) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *MsgWithdrawClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Orchestrator) > 0 { i -= len(m.Orchestrator) copy(dAtA[i:], m.Orchestrator) i = encodeVarintMsgs(dAtA, i, uint64(len(m.Orchestrator))) i-- dAtA[i] = 0x2a } if len(m.TokenContract) > 0 { i -= len(m.TokenContract) copy(dAtA[i:], m.TokenContract) i = encodeVarintMsgs(dAtA, i, uint64(len(m.TokenContract))) i-- dAtA[i] = 0x22 } if m.BatchNonce != 0 { i = encodeVarintMsgs(dAtA, i, uint64(m.BatchNonce)) i-- dAtA[i] = 0x18 } if m.BlockHeight != 0 { i = encodeVarintMsgs(dAtA, i, uint64(m.BlockHeight)) i-- dAtA[i] = 0x10 } if m.EventNonce != 0 { i = encodeVarintMsgs(dAtA, i, uint64(m.EventNonce)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *MsgWithdrawClaimResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MsgWithdrawClaimResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *MsgWithdrawClaimResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l return len(dAtA) - i, nil } func (m *MsgERC20DeployedClaim) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MsgERC20DeployedClaim) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *MsgERC20DeployedClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Orchestrator) > 0 { i -= len(m.Orchestrator) copy(dAtA[i:], m.Orchestrator) i = encodeVarintMsgs(dAtA, i, uint64(len(m.Orchestrator))) i-- dAtA[i] = 0x42 } if m.Decimals != 0 { i = encodeVarintMsgs(dAtA, i, uint64(m.Decimals)) i-- dAtA[i] = 0x38 } if len(m.Symbol) > 0 { i -= len(m.Symbol) copy(dAtA[i:], m.Symbol) i = encodeVarintMsgs(dAtA, i, uint64(len(m.Symbol))) i-- dAtA[i] = 0x32 } if len(m.Name) > 0 { i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintMsgs(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0x2a } if len(m.TokenContract) > 0 { i -= len(m.TokenContract) copy(dAtA[i:], m.TokenContract) i = encodeVarintMsgs(dAtA, i, uint64(len(m.TokenContract))) i-- dAtA[i] = 0x22 } if len(m.CosmosDenom) > 0 { i -= len(m.CosmosDenom) copy(dAtA[i:], m.CosmosDenom) i = encodeVarintMsgs(dAtA, i, uint64(len(m.CosmosDenom))) i-- dAtA[i] = 0x1a } if m.BlockHeight != 0 { i = encodeVarintMsgs(dAtA, i, uint64(m.BlockHeight)) i-- dAtA[i] = 0x10 } if m.EventNonce != 0 { i = encodeVarintMsgs(dAtA, i, uint64(m.EventNonce)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *MsgERC20DeployedClaimResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MsgERC20DeployedClaimResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *MsgERC20DeployedClaimResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l return len(dAtA) - i, nil } func
(dAtA []byte, offset int, v uint64) int { offset -= sovMsgs(v) base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return base } func (m *MsgSetOrchestratorAddress) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Validator) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } l = len(m.Orchestrator) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } l = len(m.EthAddress) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } return n } func (m *MsgSetOrchestratorAddressResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l return n } func (m *MsgValsetConfirm) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Nonce != 0 { n += 1 + sovMsgs(uint64(m.Nonce)) } l = len(m.Orchestrator) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } l = len(m.EthAddress) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } l = len(m.Signature) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } return n } func (m *MsgValsetConfirmResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l return n } func (m *MsgSendToEth) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Sender) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } l = len(m.EthDest) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } l = m.Amount.Size() n += 1 + l + sovMsgs(uint64(l)) l = m.BridgeFee.Size() n += 1 + l + sovMsgs(uint64(l)) return n } func (m *MsgSendToEthResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l return n } func (m *MsgRequestBatch) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Orchestrator) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } l = len(m.Denom) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } return n } func (m *MsgRequestBatchResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l return n } func (m *MsgConfirmBatch) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Nonce != 0 { n += 1 + sovMsgs(uint64(m.Nonce)) } l = len(m.TokenContract) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } l = len(m.EthSigner) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } l = len(m.Orchestrator) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } l = len(m.Signature) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } return n } func (m *MsgConfirmBatchResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l return n } func (m *MsgDepositClaim) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.EventNonce != 0 { n += 1 + sovMsgs(uint64(m.EventNonce)) } if m.BlockHeight != 0 { n += 1 + sovMsgs(uint64(m.BlockHeight)) } l = len(m.TokenContract) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } l = m.Amount.Size() n += 1 + l + sovMsgs(uint64(l)) l = len(m.EthereumSender) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } l = len(m.CosmosReceiver) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } l = len(m.Orchestrator) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } return n } func (m *MsgDepositClaimResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l return n } func (m *MsgWithdrawClaim) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.EventNonce != 0 { n += 1 + sovMsgs(uint64(m.EventNonce)) } if m.BlockHeight != 0 { n += 1 + sovMsgs(uint64(m.BlockHeight)) } if m.BatchNonce != 0 { n += 1 + sovMsgs(uint64(m.BatchNonce)) } l = len(m.TokenContract) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } l = len(m.Orchestrator) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } return n } func (m *MsgWithdrawClaimResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l return n } func (m *MsgERC20DeployedClaim) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.EventNonce != 0 { n += 1 + sovMsgs(uint64(m.EventNonce)) } if m.BlockHeight != 0 { n += 1 + sovMsgs(uint64(m.BlockHeight)) } l = len(m.CosmosDenom) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } l = len(m.TokenContract) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } l = len(m.Name) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } l = len(m.Symbol) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } if m.Decimals != 0 { n += 1 + sovMsgs(uint64(m.Decimals)) } l = len(m.Orchestrator) if l > 0 { n += 1 + l + sovMsgs(uint64(l)) } return n } func (m *MsgERC20DeployedClaimResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l return n } func sovMsgs(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozMsgs(x uint64) (n int) { return sovMsgs(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *MsgSetOrchestratorAddress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MsgSetOrchestratorAddress: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MsgSetOrchestratorAddress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.Validator = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Orchestrator", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.Orchestrator = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field EthAddress", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.EthAddress = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMsgs(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MsgSetOrchestratorAddressResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MsgSetOrchestratorAddressResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MsgSetOrchestratorAddressResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: iNdEx = preIndex skippy, err := skipMsgs(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MsgValsetConfirm) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MsgValsetConfirm: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MsgValsetConfirm: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Nonce", wireType) } m.Nonce = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Nonce |= uint64(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Orchestrator", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.Orchestrator = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field EthAddress", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.EthAddress = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.Signature = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMsgs(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MsgValsetConfirmResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MsgValsetConfirmResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MsgValsetConfirmResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: iNdEx = preIndex skippy, err := skipMsgs(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MsgSendToEth) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MsgSendToEth: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MsgSendToEth: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.Sender = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field EthDest", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.EthDest = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Amount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field BridgeFee", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } if err := m.BridgeFee.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMsgs(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MsgSendToEthResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MsgSendToEthResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MsgSendToEthResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: iNdEx = preIndex skippy, err := skipMsgs(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MsgRequestBatch) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MsgRequestBatch: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MsgRequestBatch: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Orchestrator", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.Orchestrator = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.Denom = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMsgs(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MsgRequestBatchResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MsgRequestBatchResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MsgRequestBatchResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: iNdEx = preIndex skippy, err := skipMsgs(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MsgConfirmBatch) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MsgConfirmBatch: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MsgConfirmBatch: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Nonce", wireType) } m.Nonce = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Nonce |= uint64(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TokenContract", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.TokenContract = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field EthSigner", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.EthSigner = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Orchestrator", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.Orchestrator = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.Signature = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMsgs(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MsgConfirmBatchResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MsgConfirmBatchResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MsgConfirmBatchResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: iNdEx = preIndex skippy, err := skipMsgs(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MsgDepositClaim) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MsgDepositClaim: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MsgDepositClaim: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field EventNonce", wireType) } m.EventNonce = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.EventNonce |= uint64(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field BlockHeight", wireType) } m.BlockHeight = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.BlockHeight |= uint64(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TokenContract", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.TokenContract = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Amount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field EthereumSender", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.EthereumSender = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field CosmosReceiver", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.CosmosReceiver = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Orchestrator", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.Orchestrator = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMsgs(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MsgDepositClaimResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MsgDepositClaimResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MsgDepositClaimResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: iNdEx = preIndex skippy, err := skipMsgs(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MsgWithdrawClaim) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MsgWithdrawClaim: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MsgWithdrawClaim: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field EventNonce", wireType) } m.EventNonce = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.EventNonce |= uint64(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field BlockHeight", wireType) } m.BlockHeight = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.BlockHeight |= uint64(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field BatchNonce", wireType) } m.BatchNonce = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.BatchNonce |= uint64(b&0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TokenContract", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.TokenContract = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Orchestrator", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.Orchestrator = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMsgs(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MsgWithdrawClaimResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MsgWithdrawClaimResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MsgWithdrawClaimResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: iNdEx = preIndex skippy, err := skipMsgs(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MsgERC20DeployedClaim) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MsgERC20DeployedClaim: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MsgERC20DeployedClaim: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field EventNonce", wireType) } m.EventNonce = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.EventNonce |= uint64(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field BlockHeight", wireType) } m.BlockHeight = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.BlockHeight |= uint64(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field CosmosDenom", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.CosmosDenom = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TokenContract", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.TokenContract = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Symbol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.Symbol = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 7: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Decimals", wireType) } m.Decimals = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Decimals |= uint64(b&0x7F) << shift if b < 0x80 { break } } case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Orchestrator", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthMsgs } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthMsgs } if postIndex > l { return io.ErrUnexpectedEOF } m.Orchestrator = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMsgs(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MsgERC20DeployedClaimResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMsgs } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MsgERC20DeployedClaimResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MsgERC20DeployedClaimResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: iNdEx = preIndex skippy, err := skipMsgs(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) < 0 { return ErrInvalidLengthMsgs } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipMsgs(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowMsgs } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowMsgs } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } case 1: iNdEx += 8 case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowMsgs } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if length < 0 { return 0, ErrInvalidLengthMsgs } iNdEx += length case 3: depth++ case 4: if depth == 0 { return 0, ErrUnexpectedEndOfGroupMsgs } depth-- case 5: iNdEx += 4 default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } if iNdEx < 0 { return 0, ErrInvalidLengthMsgs } if depth == 0 { return iNdEx, nil } } return 0, io.ErrUnexpectedEOF } var ( ErrInvalidLengthMsgs = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowMsgs = fmt.Errorf("proto: integer overflow") ErrUnexpectedEndOfGroupMsgs = fmt.Errorf("proto: unexpected end of group") )
encodeVarintMsgs
linechart.py
import plotly.graph_objects as go from mainapp.app_settings import cell_length_meter def getLineChart( data, timestamp, coordinates, colorScale, timeline, color_range, dragmode=False, quick_select_range=True, calibration_time=None, show_legend=False): if len(data) < 1: return { 'data': [], 'layout': go.Layout(title=go.layout.Title(text='No data found')) } x = data.iloc[:, 0].values linechart_fig = go.Figure() means = data.iloc[:, 1:].transpose().mean().transpose() var = data.iloc[:, 1:].transpose().std().transpose() # Add continuous error bars to the plot '''error_colors = ['#d9d9d9', '#bdbdbd', '#969696'] for i in reversed(range(1, 4)): fill_color = error_colors[i-1] if data.shape[1] > 2: linechart_fig.add_trace(go.Scatter( x=x, y=means - i * var, mode='lines', line=dict(width=1, color='black'), showlegend=False )) linechart_fig.add_trace(go.Scatter( name='{} sigma'.format(i), x=x, y=means + i * var, mode='lines', marker=dict(color="#444"), line=dict(width=1, color='black'), fillcolor=fill_color, fill='tonexty'))''' # Add individual traces to the plot ys = data.shape[1] for y in range(1, ys): coord = coordinates[y-1] y = data.iloc[:, y].values linechart_fig.add_trace(go.Scatter( name='[{:2d},{:2d}]'.format(coord['x'], coord['y']), x=x, y=y, mode='lines+markers', line=dict( width=1, color='#292929'), marker=dict( size=2, color='#292929'), showlegend=show_legend )) # Add central values to the plot '''if data.shape[1] > 1: if data.shape[1] == 2: trace_name = '[{:d},{:d}]'.format(coordinates[0]['x'], coordinates[0]['y']) else: trace_name = 'Average' linechart_fig.add_trace(go.Scatter( name=trace_name, x=x, y=means, mode='lines+markers', line=dict( color='#292929', width=1, ), marker=dict( color='#292929', size=3, ), showlegend=True, ))''' # Add vertical line representing selected timestamp linechart_fig.add_shape( # Line Vertical dict( name='selected timestamp', type="line", yref='paper', x0=timestamp, y0=0, x1=timestamp, y1=1, line=dict( color="black", width=5 ), )) # Add vertical line representing selected calibration if calibration_time is not None: linechart_fig.add_shape( # Line Vertical dict( name='calibration time', type="line", yref='paper', x0=calibration_time, y0=0, x1=calibration_time, y1=1, line=dict( color="green", width=5 ), )) #Add colorbar to plot if color_range['min'] is not None and color_range['max'] is not None: min = color_range['min'] max = color_range['max'] width_of_line = (color_range['max'] - color_range['min']) / len(colorScale) for i in range(len(colorScale)): linechart_fig.add_shape( dict( type="rect", xref="paper", yref="y", x0=0, y0= min + i*width_of_line, #if i > 0 else 0 if min <= max else 12000, x1=1, y1=min + (i+1)*width_of_line, #if i < len(colorScale)-1 else 12000 if min <= max else 0, fillcolor=colorScale[i][1],
line_width=0, ) ) range_selector = None if quick_select_range: range_selector = dict( buttons=list([ dict(count=1, label="1m", step="minute", stepmode="backward"), dict(count=1, label="1h", step="hour", stepmode="backward"), dict(count=1, label="1d", step="day", stepmode="backward"), dict(count=7, label="1w", step="day", stepmode="backward") ]) ) linechart_fig.update_layout( xaxis=dict( range=[timeline['start'], timeline['end']], type="date", linecolor='black', gridcolor='LightGrey', rangeselector=range_selector ), yaxis=dict( title='Resistivity (Ohm)', rangemode='tozero', linecolor='black', gridcolor='LightGrey', fixedrange=True ), margin=dict( l=15, r=0, t=30, b=5, pad=0 ), plot_bgcolor='white', dragmode=dragmode, height=250, ) return linechart_fig
opacity=0.6, layer="below",
ServoMount-with-two-FrontPlates.jscad
const { cuboid, cylinder, cylinderElliptic } = require('@jscad/modeling').primitives const { translate, rotateX, rotateY, rotateZ } = require('@jscad/modeling').transforms const { union, subtract } = require('@jscad/modeling').booleans const { polygon } = require('@jscad/modeling').primitives const { extrudeLinear } = require('@jscad/modeling').extrusions const Epsilon = 0.1 // a small extra for imprecise printers const Thickness = 2.0 const BoreholeDiameter = 3.0, BoreholeRadius = BoreholeDiameter/2 const BoreholeOffset = 8.0 const BoreholeDistance = BoreholeOffset - BoreholeDiameter const BoreholeSpace = BoreholeDistance/2 const ServoBodyWidth = 13.0 // a bit larger than in reality const ServoBodyLength = 23.0 // dto. const ServoBodyHeight = 27.0 // dto. const ServoFlangeOffset = 15.6 // dto. const ServoFlangeThickness = 2.5 + Epsilon const ServoFlangeLength = 5.0 + Epsilon const ServoScrewOffset = 2.0 const ServoHornAxisOffset = 6.5 // more a guess than a measurement const ScrewDiameter = 2.0, ScrewRadius = ScrewDiameter/2 const NutThickness = 3.0 const Angle90 = 90 * Math.PI/180 const Angle180 = 180 * Math.PI/180 const Angle270 = 270 * Math.PI/180 const main = () => { let Flange = subtract( // servo should be mounted from the top cuboid({ size:[ ServoFlangeLength, Thickness, ServoBodyWidth ] }), rotateX( Angle90, cylinder({ radius:ScrewRadius, height:Thickness+1.0 }) ) ) let leftFlange = translate([ -ServoFlangeLength/2, Thickness/2, ServoBodyWidth/2 ], Flange) let rightFlange = translate([ ServoFlangeLength/2+ServoBodyLength, Thickness/2, ServoBodyWidth/2 ], Flange) const BracketLength = ServoFlangeOffset + Thickness + NutThickness // gives room for a nut underneath the servo let Bracket = cuboid({ size:[ Thickness,BracketLength,ServoBodyWidth ] }) let BoreholeCount = Math.floor((BracketLength-2*Thickness)/BoreholeOffset) for (let i = 0; i < BoreholeCount; i++) { Bracket = subtract( Bracket, translate( [ 0,(i-(BoreholeCount-1)/2)*BoreholeOffset,0 ], rotateY( Angle90, cylinder({ radius:BoreholeRadius+Epsilon, height:Thickness }) ) ) ) }
let extendedBracket = cuboid({ size:[ Thickness,BracketLength,ServoBodyWidth+BracketExtension ] }) extendedBracket = translate([ 0,0,BracketExtension/2 ], extendedBracket) for (let i = 0; i < BoreholeCount; i++) { extendedBracket = subtract( extendedBracket, translate( [ 0,(i-(BoreholeCount-1)/2)*BoreholeOffset,0 ], rotateY( Angle90, cylinder({ radius:BoreholeRadius+Epsilon, height:Thickness }) ) ), translate( [ 0,(i-(BoreholeCount-1)/2)*BoreholeOffset,BoreholeOffset ], rotateY( Angle90, cylinder({ radius:BoreholeRadius+Epsilon, height:Thickness }) ) ) ) } extendedBracket = translate([0,BracketLength/2,ServoBodyWidth/2], extendedBracket) let leftBracket = translate([ -0.5*Thickness-ServoFlangeLength,0,0 ], extendedBracket) let rightBracket = translate([ 0.5*Thickness+ServoFlangeLength+ServoBodyLength,0,0 ], extendedBracket) const BaseLength = ServoBodyLength+2*ServoFlangeLength let Base = cuboid({ size:[ BaseLength, Thickness, ServoBodyWidth ] }) BoreholeCount = Math.floor(BaseLength/BoreholeOffset) for (let i = 0; i < BoreholeCount; i++) { Base = subtract( Base, translate( [ (i-(BoreholeCount-1)/2)*BoreholeOffset,0,0 ], rotateX( Angle90, cylinder({ radius:BoreholeRadius+Epsilon, height:Thickness }) ) ) ) } Base = translate([ ServoBodyLength/2, BracketLength-Thickness/2, ServoBodyWidth/2 ], Base) let ServoMount = union( leftFlange, leftBracket, rightFlange, rightBracket, Base ) return ServoMount } module.exports = { main }
Bracket = translate([0,BracketLength/2,ServoBodyWidth/2], Bracket) let BracketExtension = BoreholeOffset
init.go
// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "bytes" "errors" "fmt" "io" "io/ioutil" "os" "os/exec" "path/filepath" "runtime" "strings" "time" ) var ( goos = runtime.GOOS goarch = runtime.GOARCH ) var cmdInit = &command{ run: runInit, Name: "init", Usage: "[-openal dir]", Short: "build OpenAL for Android", Long: ` If a OpenAL source directory is specified with -openal, init will build an Android version of OpenAL for use with gomobile build and gomobile install. `, } var initOpenAL string // -openal func init() { cmdInit.flag.StringVar(&initOpenAL, "openal", "", "OpenAL source path") } func runInit(cmd *command) error { gopaths := filepath.SplitList(goEnv("GOPATH")) if len(gopaths) == 0 { return fmt.Errorf("GOPATH is not set") } gomobilepath = filepath.Join(gopaths[0], "pkg/gomobile") if buildX || buildN { fmt.Fprintln(xout, "GOMOBILE="+gomobilepath) } removeAll(gomobilepath) if err := mkdir(gomobilepath); err != nil { return err } if buildN { tmpdir = filepath.Join(gomobilepath, "work") } else { var err error tmpdir, err = ioutil.TempDir(gomobilepath, "work-") if err != nil { return err } } if buildX || buildN { fmt.Fprintln(xout, "WORK="+tmpdir) } defer func() { if buildWork { fmt.Printf("WORK=%s\n", tmpdir) return } removeAll(tmpdir) }() // Make sure gobind is up to date. if err := goInstall([]string{"golang.org/x/mobile/cmd/gobind"}, nil); err != nil { return err } if buildN { initOpenAL = "$OPENAL_PATH" } else { if initOpenAL != "" { var err error if initOpenAL, err = filepath.Abs(initOpenAL); err != nil { return err } } } if err := envInit(); err != nil { return err } start := time.Now() if err := installOpenAL(gomobilepath); err != nil { return err } if buildV { took := time.Since(start) / time.Second * time.Second fmt.Fprintf(os.Stderr, "\nDone, build took %s.\n", took) } return nil } func installOpenAL(gomobilepath string) error { if initOpenAL == "" { return nil } ndkRoot, err := ndkRoot() if err != nil { return err } var cmake string if buildN { cmake = "cmake" } else { sdkRoot := os.Getenv("ANDROID_HOME") if sdkRoot == "" { return nil } var err error cmake, err = exec.LookPath("cmake") if err != nil { cmakePath := filepath.Join(sdkRoot, "cmake") cmakeDir, err := os.Open(cmakePath) if err != nil { if os.IsNotExist(err) { // Skip OpenAL install if the cmake package is not installed. return errors.New("cmake was not found in the PATH. Please install it through the Android SDK manager.") } return err } defer cmakeDir.Close() // There might be multiple versions of CMake installed. Use any one for now. cmakeVers, err := cmakeDir.Readdirnames(1) if err != nil || len(cmakeVers) == 0 { return errors.New("cmake was not found in the PATH. Please install it through the Android SDK manager.") } cmake = filepath.Join(cmakePath, cmakeVers[0], "bin", "cmake") } } var alTmpDir string if buildN { alTmpDir = filepath.Join(gomobilepath, "work") } else { var err error alTmpDir, err = ioutil.TempDir(gomobilepath, "openal-release-") if err != nil { return err } defer removeAll(alTmpDir) } for _, f := range []string{"include/AL/al.h", "include/AL/alc.h"} { dst := filepath.Join(gomobilepath, f) src := filepath.Join(initOpenAL, f) if err := copyFile(dst, src); err != nil { return err } } for _, arch := range allArchs { t := ndk[arch] abi := t.arch if abi == "arm" { abi = "armeabi" } make := filepath.Join(ndkRoot, "prebuilt", archNDK(), "bin", "make") // Split android-XX to get the api version. buildDir := alTmpDir + "/build/" + abi if err := mkdir(buildDir); err != nil { return err } cmd := exec.Command(cmake, initOpenAL, "-DCMAKE_TOOLCHAIN_FILE="+initOpenAL+"/XCompile-Android.txt", "-DHOST="+t.ClangPrefix()) cmd.Dir = buildDir tcPath := filepath.Join(ndkRoot, "toolchains", "llvm", "prebuilt", archNDK(), "bin") if !buildN { orgPath := os.Getenv("PATH") cmd.Env = []string{"PATH=" + tcPath + string(os.PathListSeparator) + orgPath} } if err := runCmd(cmd); err != nil { return err } cmd = exec.Command(make) cmd.Dir = buildDir if err := runCmd(cmd); err != nil { return err } dst := filepath.Join(gomobilepath, "lib", t.abi, "libopenal.so") src := filepath.Join(alTmpDir, "build", abi, "libopenal.so") if err := copyFile(dst, src); err != nil { return err } } return nil } var commonPkgs = []string{ "golang.org/x/mobile/gl", "golang.org/x/mobile/app", "golang.org/x/mobile/exp/app/debug", } func mkdir(dir string) error { if buildX || buildN { printcmd("mkdir -p %s", dir) } if buildN { return nil } return os.MkdirAll(dir, 0755) } func symlink(src, dst string) error { if buildX || buildN { printcmd("ln -s %s %s", src, dst) } if buildN { return nil } if goos == "windows" { return doCopyAll(dst, src) } return os.Symlink(src, dst) } func rm(name string) error { if buildX || buildN { printcmd("rm %s", name) } if buildN { return nil } return os.Remove(name) } func doCopyAll(dst, src string) error { return filepath.Walk(src, func(path string, info os.FileInfo, errin error) (err error) { if errin != nil { return errin } prefixLen := len(src) if len(path) > prefixLen { prefixLen++ // file separator } outpath := filepath.Join(dst, path[prefixLen:]) if info.IsDir() { return os.Mkdir(outpath, 0755) } in, err := os.Open(path) if err != nil { return err } defer in.Close() out, err := os.OpenFile(outpath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, info.Mode()) if err != nil { return err } defer func() { if errc := out.Close(); err == nil { err = errc } }() _, err = io.Copy(out, in) return err }) } func removeAll(path string) error { if buildX || buildN { printcmd(`rm -r -f "%s"`, path) } if buildN { return nil } // os.RemoveAll behaves differently in windows. // http://golang.org/issues/9606 if goos == "windows" { resetReadOnlyFlagAll(path) } return os.RemoveAll(path) } func resetReadOnlyFlagAll(path string) error { fi, err := os.Stat(path) if err != nil { return err } if !fi.IsDir() { return os.Chmod(path, 0666) } fd, err := os.Open(path) if err != nil { return err } defer fd.Close() names, _ := fd.Readdirnames(-1) for _, name := range names { resetReadOnlyFlagAll(path + string(filepath.Separator) + name) } return nil } func goEnv(name string) string { if val := os.Getenv(name); val != "" { return val } val, err := exec.Command(goBin(), "env", name).Output() if err != nil { panic(err) // the Go tool was tested to work earlier } return strings.TrimSpace(string(val)) } func runCmd(cmd *exec.Cmd) error { if buildX || buildN { dir := "" if cmd.Dir != "" { dir = "PWD=" + cmd.Dir + " " } env := strings.Join(cmd.Env, " ")
env += " " } args := make([]string, len(cmd.Args)) copy(args, cmd.Args) if args[0] == goBin() { args[0] = "go" } printcmd("%s%s%s", dir, env, strings.Join(args, " ")) } buf := new(bytes.Buffer) buf.WriteByte('\n') if buildV { cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr } else { cmd.Stdout = buf cmd.Stderr = buf } if buildWork { if goos == "windows" { cmd.Env = append(cmd.Env, `TEMP=`+tmpdir) cmd.Env = append(cmd.Env, `TMP=`+tmpdir) } else { cmd.Env = append(cmd.Env, `TMPDIR=`+tmpdir) } } if !buildN { cmd.Env = environ(cmd.Env) if err := cmd.Run(); err != nil { return fmt.Errorf("%s failed: %v%s", strings.Join(cmd.Args, " "), err, buf) } } return nil }
if env != "" {
collector.rs
// Copyright 2015-2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use super::*; use dep_graph::{DepGraph, DepKind, DepNodeIndex}; use hir::def_id::{LOCAL_CRATE, CrateNum}; use hir::intravisit::{Visitor, NestedVisitorMap}; use hir::svh::Svh; use ich::Fingerprint; use middle::cstore::CrateStore; use session::CrateDisambiguator; use std::iter::repeat; use syntax::ast::{NodeId, CRATE_NODE_ID}; use syntax::codemap::CodeMap; use syntax_pos::Span; use ich::StableHashingContext; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; /// A Visitor that walks over the HIR and collects Nodes into a HIR map pub(super) struct NodeCollector<'a, 'hir> { /// The crate krate: &'hir Crate, /// The node map map: Vec<MapEntry<'hir>>, /// The parent of this node parent_node: NodeId, // These fields keep track of the currently relevant DepNodes during // the visitor's traversal. current_dep_node_owner: DefIndex, current_signature_dep_index: DepNodeIndex, current_full_dep_index: DepNodeIndex, currently_in_body: bool, dep_graph: &'a DepGraph, definitions: &'a definitions::Definitions, hcx: StableHashingContext<'a>, // We are collecting DepNode::HirBody hashes here so we can compute the // crate hash from then later on. hir_body_nodes: Vec<(DefPathHash, DepNodeIndex)>, } impl<'a, 'hir> NodeCollector<'a, 'hir> { pub(super) fn root(krate: &'hir Crate, dep_graph: &'a DepGraph, definitions: &'a definitions::Definitions, hcx: StableHashingContext<'a>) -> NodeCollector<'a, 'hir> { let root_mod_def_path_hash = definitions.def_path_hash(CRATE_DEF_INDEX); // Allocate DepNodes for the root module let (root_mod_sig_dep_index, root_mod_full_dep_index); { let Crate { ref module, // Crate attributes are not copied over to the root `Mod`, so hash // them explicitly here. ref attrs, span, // These fields are handled separately: exported_macros: _, items: _, trait_items: _, impl_items: _, bodies: _, trait_impls: _, trait_auto_impl: _, body_ids: _, } = *krate; root_mod_sig_dep_index = dep_graph.with_task( root_mod_def_path_hash.to_dep_node(DepKind::Hir), &hcx, HirItemLike { item_like: (module, attrs, span), hash_bodies: false }, identity_fn ).1; root_mod_full_dep_index = dep_graph.with_task( root_mod_def_path_hash.to_dep_node(DepKind::HirBody), &hcx, HirItemLike { item_like: (module, attrs, span), hash_bodies: true }, identity_fn ).1; } { dep_graph.with_task( DepNode::new_no_params(DepKind::AllLocalTraitImpls), &hcx, &krate.trait_impls, identity_fn ); } let hir_body_nodes = vec![(root_mod_def_path_hash, root_mod_full_dep_index)]; let mut collector = NodeCollector { krate, map: vec![], parent_node: CRATE_NODE_ID, current_signature_dep_index: root_mod_sig_dep_index, current_full_dep_index: root_mod_full_dep_index, current_dep_node_owner: CRATE_DEF_INDEX, currently_in_body: false, dep_graph, definitions, hcx, hir_body_nodes, }; collector.insert_entry(CRATE_NODE_ID, RootCrate(root_mod_sig_dep_index)); collector } pub(super) fn finalize_and_compute_crate_hash(mut self, crate_disambiguator: CrateDisambiguator, cstore: &dyn CrateStore, codemap: &CodeMap, commandline_args_hash: u64) -> (Vec<MapEntry<'hir>>, Svh) { self .hir_body_nodes .sort_unstable_by(|&(ref d1, _), &(ref d2, _)| d1.cmp(d2)); let node_hashes = self .hir_body_nodes .iter() .fold(Fingerprint::ZERO, |fingerprint , &(def_path_hash, dep_node_index)| { fingerprint.combine( def_path_hash.0.combine(self.dep_graph.fingerprint_of(dep_node_index)) ) }); let mut upstream_crates: Vec<_> = cstore.crates_untracked().iter().map(|&cnum| { let name = cstore.crate_name_untracked(cnum).as_str(); let disambiguator = cstore.crate_disambiguator_untracked(cnum) .to_fingerprint(); let hash = cstore.crate_hash_untracked(cnum); (name, disambiguator, hash) }).collect(); upstream_crates.sort_unstable_by(|&(name1, dis1, _), &(name2, dis2, _)| { (name1, dis1).cmp(&(name2, dis2)) }); // We hash the final, remapped names of all local source files so we // don't have to include the path prefix remapping commandline args. // If we included the full mapping in the SVH, we could only have // reproducible builds by compiling from the same directory. So we just // hash the result of the mapping instead of the mapping itself. let mut source_file_names: Vec<_> = codemap .files() .iter() .filter(|filemap| CrateNum::from_u32(filemap.crate_of_origin) == LOCAL_CRATE) .map(|filemap| filemap.name_hash) .collect(); source_file_names.sort_unstable(); let (_, crate_dep_node_index) = self .dep_graph .with_task(DepNode::new_no_params(DepKind::Krate), &self.hcx, (((node_hashes, upstream_crates), source_file_names), (commandline_args_hash, crate_disambiguator.to_fingerprint())), identity_fn); let svh = Svh::new(self.dep_graph .fingerprint_of(crate_dep_node_index) .to_smaller_hash()); (self.map, svh) } fn insert_entry(&mut self, id: NodeId, entry: MapEntry<'hir>) { debug!("hir_map: {:?} => {:?}", id, entry); let len = self.map.len(); if id.as_usize() >= len { self.map.extend(repeat(NotPresent).take(id.as_usize() - len + 1)); } self.map[id.as_usize()] = entry; } fn insert(&mut self, id: NodeId, node: Node<'hir>) { let parent = self.parent_node; let dep_node_index = if self.currently_in_body { self.current_full_dep_index } else { self.current_signature_dep_index }; let entry = match node { NodeItem(n) => EntryItem(parent, dep_node_index, n), NodeForeignItem(n) => EntryForeignItem(parent, dep_node_index, n), NodeTraitItem(n) => EntryTraitItem(parent, dep_node_index, n), NodeImplItem(n) => EntryImplItem(parent, dep_node_index, n), NodeVariant(n) => EntryVariant(parent, dep_node_index, n), NodeField(n) => EntryField(parent, dep_node_index, n), NodeExpr(n) => EntryExpr(parent, dep_node_index, n), NodeStmt(n) => EntryStmt(parent, dep_node_index, n), NodeTy(n) => EntryTy(parent, dep_node_index, n), NodeTraitRef(n) => EntryTraitRef(parent, dep_node_index, n), NodeBinding(n) => EntryBinding(parent, dep_node_index, n), NodePat(n) => EntryPat(parent, dep_node_index, n), NodeBlock(n) => EntryBlock(parent, dep_node_index, n), NodeStructCtor(n) => EntryStructCtor(parent, dep_node_index, n), NodeLifetime(n) => EntryLifetime(parent, dep_node_index, n), NodeTyParam(n) => EntryTyParam(parent, dep_node_index, n), NodeVisibility(n) => EntryVisibility(parent, dep_node_index, n), NodeLocal(n) => EntryLocal(parent, dep_node_index, n), NodeMacroDef(n) => EntryMacroDef(dep_node_index, n), }; // Make sure that the DepNode of some node coincides with the HirId // owner of that node. if cfg!(debug_assertions) { let hir_id_owner = self.definitions.node_to_hir_id(id).owner; if hir_id_owner != self.current_dep_node_owner { let node_str = match self.definitions.opt_def_index(id) { Some(def_index) => { self.definitions.def_path(def_index).to_string_no_crate() } None => format!("{:?}", node) }; bug!("inconsistent DepNode for `{}`: \ current_dep_node_owner={}, hir_id.owner={}", node_str, self.definitions .def_path(self.current_dep_node_owner) .to_string_no_crate(), self.definitions.def_path(hir_id_owner).to_string_no_crate()) } } self.insert_entry(id, entry); } fn with_parent<F: FnOnce(&mut Self)>(&mut self, parent_id: NodeId, f: F) { let parent_node = self.parent_node; self.parent_node = parent_id; f(self); self.parent_node = parent_node; } fn with_dep_node_owner<T: HashStable<StableHashingContext<'a>>, F: FnOnce(&mut Self)>(&mut self, dep_node_owner: DefIndex, item_like: &T, f: F) { let prev_owner = self.current_dep_node_owner; let prev_signature_dep_index = self.current_signature_dep_index; let prev_full_dep_index = self.current_full_dep_index; let prev_in_body = self.currently_in_body; let def_path_hash = self.definitions.def_path_hash(dep_node_owner); self.current_signature_dep_index = self.dep_graph.with_task( def_path_hash.to_dep_node(DepKind::Hir), &self.hcx, HirItemLike { item_like, hash_bodies: false }, identity_fn ).1; self.current_full_dep_index = self.dep_graph.with_task( def_path_hash.to_dep_node(DepKind::HirBody), &self.hcx, HirItemLike { item_like, hash_bodies: true }, identity_fn ).1; self.hir_body_nodes.push((def_path_hash, self.current_full_dep_index)); self.current_dep_node_owner = dep_node_owner; self.currently_in_body = false; f(self); self.currently_in_body = prev_in_body; self.current_dep_node_owner = prev_owner; self.current_full_dep_index = prev_full_dep_index; self.current_signature_dep_index = prev_signature_dep_index; } } impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> { /// Because we want to track parent items and so forth, enable /// deep walking so that we walk nested items in the context of /// their outer items. fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'hir> { panic!("visit_nested_xxx must be manually implemented in this visitor") } fn visit_nested_item(&mut self, item: ItemId) { debug!("visit_nested_item: {:?}", item); self.visit_item(self.krate.item(item.id)); } fn visit_nested_trait_item(&mut self, item_id: TraitItemId) { self.visit_trait_item(self.krate.trait_item(item_id)); } fn visit_nested_impl_item(&mut self, item_id: ImplItemId)
fn visit_nested_body(&mut self, id: BodyId) { let prev_in_body = self.currently_in_body; self.currently_in_body = true; self.visit_body(self.krate.body(id)); self.currently_in_body = prev_in_body; } fn visit_item(&mut self, i: &'hir Item) { debug!("visit_item: {:?}", i); debug_assert_eq!(i.hir_id.owner, self.definitions.opt_def_index(i.id).unwrap()); self.with_dep_node_owner(i.hir_id.owner, i, |this| { this.insert(i.id, NodeItem(i)); this.with_parent(i.id, |this| { match i.node { ItemStruct(ref struct_def, _) => { // If this is a tuple-like struct, register the constructor. if !struct_def.is_struct() { this.insert(struct_def.id(), NodeStructCtor(struct_def)); } } _ => {} } intravisit::walk_item(this, i); }); }); } fn visit_foreign_item(&mut self, foreign_item: &'hir ForeignItem) { self.insert(foreign_item.id, NodeForeignItem(foreign_item)); self.with_parent(foreign_item.id, |this| { intravisit::walk_foreign_item(this, foreign_item); }); } fn visit_generics(&mut self, generics: &'hir Generics) { for ty_param in generics.ty_params() { self.insert(ty_param.id, NodeTyParam(ty_param)); } intravisit::walk_generics(self, generics); } fn visit_trait_item(&mut self, ti: &'hir TraitItem) { debug_assert_eq!(ti.hir_id.owner, self.definitions.opt_def_index(ti.id).unwrap()); self.with_dep_node_owner(ti.hir_id.owner, ti, |this| { this.insert(ti.id, NodeTraitItem(ti)); this.with_parent(ti.id, |this| { intravisit::walk_trait_item(this, ti); }); }); } fn visit_impl_item(&mut self, ii: &'hir ImplItem) { debug_assert_eq!(ii.hir_id.owner, self.definitions.opt_def_index(ii.id).unwrap()); self.with_dep_node_owner(ii.hir_id.owner, ii, |this| { this.insert(ii.id, NodeImplItem(ii)); this.with_parent(ii.id, |this| { intravisit::walk_impl_item(this, ii); }); }); } fn visit_pat(&mut self, pat: &'hir Pat) { let node = if let PatKind::Binding(..) = pat.node { NodeBinding(pat) } else { NodePat(pat) }; self.insert(pat.id, node); self.with_parent(pat.id, |this| { intravisit::walk_pat(this, pat); }); } fn visit_expr(&mut self, expr: &'hir Expr) { self.insert(expr.id, NodeExpr(expr)); self.with_parent(expr.id, |this| { intravisit::walk_expr(this, expr); }); } fn visit_stmt(&mut self, stmt: &'hir Stmt) { let id = stmt.node.id(); self.insert(id, NodeStmt(stmt)); self.with_parent(id, |this| { intravisit::walk_stmt(this, stmt); }); } fn visit_ty(&mut self, ty: &'hir Ty) { self.insert(ty.id, NodeTy(ty)); self.with_parent(ty.id, |this| { intravisit::walk_ty(this, ty); }); } fn visit_trait_ref(&mut self, tr: &'hir TraitRef) { self.insert(tr.ref_id, NodeTraitRef(tr)); self.with_parent(tr.ref_id, |this| { intravisit::walk_trait_ref(this, tr); }); } fn visit_fn(&mut self, fk: intravisit::FnKind<'hir>, fd: &'hir FnDecl, b: BodyId, s: Span, id: NodeId) { assert_eq!(self.parent_node, id); intravisit::walk_fn(self, fk, fd, b, s, id); } fn visit_block(&mut self, block: &'hir Block) { self.insert(block.id, NodeBlock(block)); self.with_parent(block.id, |this| { intravisit::walk_block(this, block); }); } fn visit_local(&mut self, l: &'hir Local) { self.insert(l.id, NodeLocal(l)); self.with_parent(l.id, |this| { intravisit::walk_local(this, l) }) } fn visit_lifetime(&mut self, lifetime: &'hir Lifetime) { self.insert(lifetime.id, NodeLifetime(lifetime)); } fn visit_vis(&mut self, visibility: &'hir Visibility) { match *visibility { Visibility::Public | Visibility::Crate | Visibility::Inherited => {} Visibility::Restricted { id, .. } => { self.insert(id, NodeVisibility(visibility)); self.with_parent(id, |this| { intravisit::walk_vis(this, visibility); }); } } } fn visit_macro_def(&mut self, macro_def: &'hir MacroDef) { let def_index = self.definitions.opt_def_index(macro_def.id).unwrap(); self.with_dep_node_owner(def_index, macro_def, |this| { this.insert(macro_def.id, NodeMacroDef(macro_def)); }); } fn visit_variant(&mut self, v: &'hir Variant, g: &'hir Generics, item_id: NodeId) { let id = v.node.data.id(); self.insert(id, NodeVariant(v)); self.with_parent(id, |this| { intravisit::walk_variant(this, v, g, item_id); }); } fn visit_struct_field(&mut self, field: &'hir StructField) { self.insert(field.id, NodeField(field)); self.with_parent(field.id, |this| { intravisit::walk_struct_field(this, field); }); } fn visit_trait_item_ref(&mut self, ii: &'hir TraitItemRef) { // Do not visit the duplicate information in TraitItemRef. We want to // map the actual nodes, not the duplicate ones in the *Ref. let TraitItemRef { id, name: _, kind: _, span: _, defaultness: _, } = *ii; self.visit_nested_trait_item(id); } fn visit_impl_item_ref(&mut self, ii: &'hir ImplItemRef) { // Do not visit the duplicate information in ImplItemRef. We want to // map the actual nodes, not the duplicate ones in the *Ref. let ImplItemRef { id, name: _, kind: _, span: _, vis: _, defaultness: _, } = *ii; self.visit_nested_impl_item(id); } } // We use this with DepGraph::with_task(). Since we are handling only input // values here, the "task" computing them just passes them through. fn identity_fn<T>(_: &StableHashingContext, item_like: T) -> T { item_like } // This is a wrapper structure that allows determining if span values within // the wrapped item should be hashed or not. struct HirItemLike<T> { item_like: T, hash_bodies: bool, } impl<'a, 'hir, T> HashStable<StableHashingContext<'hir>> for HirItemLike<T> where T: HashStable<StableHashingContext<'hir>> { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'hir>, hasher: &mut StableHasher<W>) { hcx.while_hashing_hir_bodies(self.hash_bodies, |hcx| { self.item_like.hash_stable(hcx, hasher); }); } }
{ self.visit_impl_item(self.krate.impl_item(item_id)); }
info.rs
// Copyright 2019-2021 Tauri Programme within The Commons Conservancy // SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: MIT use crate::helpers::{ app_paths::{app_dir, tauri_dir}, config::get as get_config, framework::infer_from_package_json as infer_framework, }; use serde::Deserialize; use std::{ collections::HashMap, fs::{read_dir, read_to_string}, panic, path::{Path, PathBuf}, process::Command, }; #[derive(Deserialize)] struct YarnVersionInfo { data: Vec<String>, } #[derive(Clone, Deserialize)] struct CargoLockPackage { name: String, version: String, } #[derive(Deserialize)] struct CargoLock { package: Vec<CargoLockPackage>, } #[derive(Deserialize)] struct JsCliVersionMetadata { version: String, node: String, } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct VersionMetadata { #[serde(rename = "cli.js")] js_cli: JsCliVersionMetadata, } #[derive(Clone, Deserialize)] struct CargoManifestDependencyPackage { version: Option<String>, path: Option<PathBuf>, #[serde(default)] features: Vec<String>, } #[derive(Clone, Deserialize)] #[serde(untagged)] enum CargoManifestDependency { Version(String), Package(CargoManifestDependencyPackage), } #[derive(Deserialize)] struct CargoManifestPackage { version: String, } #[derive(Deserialize)] struct CargoManifest { package: CargoManifestPackage, dependencies: HashMap<String, CargoManifestDependency>, } #[derive(Default)] pub struct Info; fn crate_latest_version(name: &str) -> Option<String> { let url = format!("https://docs.rs/crate/{}/", name); match ureq::get(&url).call() { Ok(response) => match (response.status(), response.header("location")) { (302, Some(location)) => Some(location.replace(&url, "")), _ => None, }, Err(_) => None, } } fn npm_latest_version(use_yarn: bool, name: &str) -> crate::Result<Option<String>> { let mut cmd; if use_yarn { #[cfg(target_os = "windows")] { cmd = Command::new("cmd"); cmd.arg("/c").arg("yarn"); } #[cfg(not(target_os = "windows"))] { cmd = Command::new("yarn") } let output = cmd .arg("info") .arg(name) .args(&["version", "--json"]) .output()?; if output.status.success() { let stdout = String::from_utf8_lossy(&output.stdout); let info: YarnVersionInfo = serde_json::from_str(&stdout)?; Ok(Some(info.data.last().unwrap().to_string())) } else { Ok(None) } } else { #[cfg(target_os = "windows")] { cmd = Command::new("cmd"); cmd.arg("/c").arg("npm"); } #[cfg(not(target_os = "windows"))] { cmd = Command::new("npm") } let output = cmd.arg("show").arg(name).arg("version").output()?; if output.status.success() { let stdout = String::from_utf8_lossy(&output.stdout); Ok(Some(stdout.replace("\n", ""))) } else { Ok(None) } } } fn npm_package_version<P: AsRef<Path>>( use_yarn: bool, name: &str, app_dir: P, ) -> crate::Result<Option<String>> { let mut cmd; let output = if use_yarn { #[cfg(target_os = "windows")] { cmd = Command::new("cmd"); cmd.arg("/c").arg("yarn"); } #[cfg(not(target_os = "windows"))] { cmd = Command::new("yarn") } cmd .args(&["list", "--pattern"]) .arg(name) .args(&["--depth", "0"]) .current_dir(app_dir) .output()? } else { #[cfg(target_os = "windows")] { cmd = Command::new("cmd"); cmd.arg("/c").arg("npm"); } #[cfg(not(target_os = "windows"))] { cmd = Command::new("npm") } cmd .arg("list") .arg(name) .args(&["version", "--depth", "0"]) .current_dir(app_dir) .output()? }; if output.status.success() { let stdout = String::from_utf8_lossy(&output.stdout); let regex = regex::Regex::new("@([\\da-zA-Z\\-\\.]+)").unwrap(); Ok( regex .captures_iter(&stdout) .last() .and_then(|cap| cap.get(1).map(|v| v.as_str().to_string())), ) } else { Ok(None) } } fn get_version(command: &str, args: &[&str]) -> crate::Result<Option<String>> { let mut cmd; #[cfg(target_os = "windows")] { cmd = Command::new("cmd"); cmd.arg("/c").arg(command); } #[cfg(not(target_os = "windows"))] { cmd = Command::new(command) } let output = cmd.args(args).arg("--version").output()?; let version = if output.status.success() { Some( String::from_utf8_lossy(&output.stdout) .replace("\n", "") .replace("\r", ""), ) } else { None }; Ok(version) } #[cfg(windows)] fn webview2_version() -> crate::Result<Option<String>>
struct InfoBlock { section: bool, key: &'static str, value: Option<String>, suffix: Option<String>, } impl InfoBlock { fn new(key: &'static str) -> Self { Self { section: false, key, value: None, suffix: None, } } fn section(mut self) -> Self { self.section = true; self } fn value<V: Into<Option<String>>>(mut self, value: V) -> Self { self.value = value.into(); self } fn suffix<S: Into<Option<String>>>(mut self, suffix: S) -> Self { self.suffix = suffix.into(); self } fn display(&self) { if self.section { println!(); } print!("{}", self.key); if let Some(value) = &self.value { print!(" - {}", value); } if let Some(suffix) = &self.suffix { print!("{}", suffix); } println!(); } } struct VersionBlock { section: bool, key: &'static str, version: Option<String>, target_version: Option<String>, } impl VersionBlock { fn new<V: Into<Option<String>>>(key: &'static str, version: V) -> Self { Self { section: false, key, version: version.into(), target_version: None, } } fn target_version<V: Into<Option<String>>>(mut self, version: V) -> Self { self.target_version = version.into(); self } fn display(&self) { if self.section { println!(); } print!("{}", self.key); if let Some(version) = &self.version { print!(" - {}", version); } else { print!(" - Not installed"); } if let (Some(version), Some(target_version)) = (&self.version, &self.target_version) { let version = semver::Version::parse(version).unwrap(); let target_version = semver::Version::parse(target_version).unwrap(); if version < target_version { print!(" (outdated, latest: {})", target_version); } } println!(); } } impl Info { pub fn new() -> Self { Default::default() } pub fn run(self) -> crate::Result<()> { let os_info = os_info::get(); InfoBlock { section: true, key: "Operating System", value: Some(format!( "{}, version {} {:?}", os_info.os_type(), os_info.version(), os_info.bitness() )), suffix: None, } .display(); #[cfg(windows)] VersionBlock::new("Webview2", webview2_version().unwrap_or_default()).display(); let hook = panic::take_hook(); panic::set_hook(Box::new(|_info| { // do nothing })); let app_dir = panic::catch_unwind(app_dir).map(Some).unwrap_or_default(); panic::set_hook(hook); let use_yarn = app_dir .map(|dir| dir.join("yarn.lock").exists()) .unwrap_or_default(); if let Some(node_version) = get_version("node", &[]).unwrap_or_default() { InfoBlock::new("Node.js environment").section().display(); let metadata = serde_json::from_str::<VersionMetadata>(include_str!("../metadata.json"))?; VersionBlock::new( " Node.js", node_version.chars().skip(1).collect::<String>(), ) .target_version(metadata.js_cli.node.replace(">= ", "")) .display(); VersionBlock::new(" @tauri-apps/cli", metadata.js_cli.version) .target_version(npm_latest_version(use_yarn, "@tauri-apps/cli").unwrap_or_default()) .display(); if let Some(app_dir) = &app_dir { VersionBlock::new( " @tauri-apps/api", npm_package_version(use_yarn, "@tauri-apps/api", app_dir).unwrap_or_default(), ) .target_version(npm_latest_version(use_yarn, "@tauri-apps/api").unwrap_or_default()) .display(); } InfoBlock::new("Global packages").section().display(); VersionBlock::new(" npm", get_version("npm", &[]).unwrap_or_default()).display(); VersionBlock::new(" yarn", get_version("yarn", &[]).unwrap_or_default()).display(); } InfoBlock::new("Rust environment").section().display(); VersionBlock::new( " rustc", get_version("rustc", &[]).unwrap_or_default().map(|v| { let mut s = v.split(' '); s.next(); s.next().unwrap().to_string() }), ) .display(); VersionBlock::new( " cargo", get_version("cargo", &[]).unwrap_or_default().map(|v| { let mut s = v.split(' '); s.next(); s.next().unwrap().to_string() }), ) .display(); if let Some(app_dir) = app_dir { InfoBlock::new("App directory structure") .section() .display(); for entry in read_dir(app_dir)? { let entry = entry?; if entry.path().is_dir() { println!("/{}", entry.path().file_name().unwrap().to_string_lossy()); } } InfoBlock::new("App").section().display(); let tauri_dir = tauri_dir(); let manifest: Option<CargoManifest> = if let Ok(manifest_contents) = read_to_string(tauri_dir.join("Cargo.toml")) { toml::from_str(&manifest_contents).ok() } else { None }; let lock: Option<CargoLock> = if let Ok(lock_contents) = read_to_string(tauri_dir.join("Cargo.lock")) { toml::from_str(&lock_contents).ok() } else { None }; let tauri_lock_packages: Vec<CargoLockPackage> = lock .as_ref() .map(|lock| { lock .package .iter() .filter(|p| p.name == "tauri") .cloned() .collect() }) .unwrap_or_default(); let (tauri_version_string, found_tauri_versions) = match (&manifest, &lock, tauri_lock_packages.len()) { (Some(_manifest), Some(_lock), 1) => { let tauri_lock_package = tauri_lock_packages.first().unwrap(); ( tauri_lock_package.version.clone(), vec![tauri_lock_package.version.clone()], ) } (None, Some(_lock), 1) => { let tauri_lock_package = tauri_lock_packages.first().unwrap(); ( format!("{} (no manifest)", tauri_lock_package.version), vec![tauri_lock_package.version.clone()], ) } _ => { let mut found_tauri_versions = Vec::new(); let manifest_version = match manifest.and_then(|m| m.dependencies.get("tauri").cloned()) { Some(tauri) => match tauri { CargoManifestDependency::Version(v) => { found_tauri_versions.push(v.clone()); v } CargoManifestDependency::Package(p) => { if let Some(v) = p.version { found_tauri_versions.push(v.clone()); v } else if let Some(p) = p.path { let manifest_path = tauri_dir.join(&p).join("Cargo.toml"); let v = match read_to_string(&manifest_path) .map_err(|_| ()) .and_then(|m| toml::from_str::<CargoManifest>(&m).map_err(|_| ())) { Ok(manifest) => manifest.package.version, Err(_) => "unknown version".to_string(), }; format!("path:{:?} [{}]", p, v) } else { "unknown manifest".to_string() } } }, None => "no manifest".to_string(), }; let lock_version = match (lock, tauri_lock_packages.is_empty()) { (Some(_lock), true) => tauri_lock_packages .iter() .map(|p| p.version.clone()) .collect::<Vec<String>>() .join(", "), (Some(_lock), false) => "unknown lockfile".to_string(), _ => "no lockfile".to_string(), }; ( format!("{} ({})", manifest_version, lock_version), found_tauri_versions, ) } }; let tauri_version = found_tauri_versions .into_iter() .map(|v| semver::Version::parse(&v).unwrap()) .max(); let suffix = match (tauri_version, crate_latest_version("tauri")) { (Some(version), Some(target_version)) => { let target_version = semver::Version::parse(&target_version).unwrap(); if version < target_version { Some(format!(" (outdated, latest: {})", target_version)) } else { None } } _ => None, }; InfoBlock::new(" tauri.rs") .value(tauri_version_string) .suffix(suffix) .display(); if let Ok(config) = get_config(None) { let config_guard = config.lock().unwrap(); let config = config_guard.as_ref().unwrap(); InfoBlock::new(" build-type") .value(if config.tauri.bundle.active { "bundle".to_string() } else { "build".to_string() }) .display(); InfoBlock::new(" CSP") .value(if let Some(security) = &config.tauri.security { security.csp.clone().unwrap_or_else(|| "unset".to_string()) } else { "unset".to_string() }) .display(); InfoBlock::new(" distDir") .value(config.build.dist_dir.to_string()) .display(); InfoBlock::new(" devPath") .value(config.build.dev_path.to_string()) .display(); } if let Ok(package_json) = read_to_string(app_dir.join("package.json")) { let (framework, bundler) = infer_framework(&package_json); if let Some(framework) = framework { InfoBlock::new(" framework") .value(framework.to_string()) .display(); } if let Some(bundler) = bundler { InfoBlock::new(" bundler") .value(bundler.to_string()) .display(); } } else { println!("package.json not found"); } } Ok(()) } }
{ let output = Command::new("powershell") .args(&["-NoProfile", "-Command"]) .arg("Get-ItemProperty -Path 'HKLM:\\SOFTWARE\\WOW6432Node\\Microsoft\\EdgeUpdate\\Clients\\{F3017226-FE2A-4295-8BDF-00C3A9A7E4C5}' | ForEach-Object {$_.pv}") .output()?; let version = if output.status.success() { Some(String::from_utf8_lossy(&output.stdout).replace("\n", "")) } else { // check 32bit installation let output = Command::new("powershell") .args(&["-NoProfile", "-Command"]) .arg("Get-ItemProperty -Path 'HKLM:\\SOFTWARE\\Microsoft\\EdgeUpdate\\Clients\\{F3017226-FE2A-4295-8BDF-00C3A9A7E4C5}' | ForEach-Object {$_.pv}") .output()?; if output.status.success() { Some(String::from_utf8_lossy(&output.stdout).replace("\n", "")) } else { None } }; Ok(version) }
main.ts
// import a CSS module import './main.css'; interface Person { firstName: string; lastName: string; } function
(user: Person) { document.getElementById('message').innerText = `Hello ${user.firstName} ${user.lastName}`; } export default main;
main
filter.go
/* Copyright 2017 The Nuclio Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package functiontemplates import "strings" type Filter struct { Contains string } func (ftf *Filter) functionTemplatePasses(template *FunctionTemplate) bool { if ftf.empty() { return true } stringsToSearch := []string{ template.Name, string(template.serializedTemplate), } for _, stringToSearch := range stringsToSearch { if strings.Contains(stringToSearch, ftf.Contains) { return true
return false } func (ftf *Filter) empty() bool { return ftf.Contains == "" }
} }
Grid.tsx
import React from 'react'; import classNames from 'classnames'; import { ucfirst } from '../utils'; import css from './Grid.module.css'; type PropsT = { align?: 'start' | 'end' | 'stretch' | 'center'; centered?: boolean; children: any; className?: string; el?: any; fluid?: boolean; justifyContent?: 'start' | 'end' | 'center'; seamless?: boolean; textAlign?: 'left' | 'right' | 'center'; }; export default function
({ align, centered, children, className, el: Element = 'div', justifyContent, fluid, seamless, textAlign = 'left', ...props }: PropsT) { return ( <Element className={classNames( css.grid, textAlign && css[`textAlign${ucfirst(textAlign)}`], align && css[`align${ucfirst(align)}`], justifyContent && css[`justifyContent${ucfirst(justifyContent)}`], className, { [css.fluid]: fluid, [css.seamless]: seamless, [css.centered]: centered, } )} {...props} > {children} </Element> ); }
Row
override_string_type.rs
use crate::{config, env::Env, library::*}; use log::error; pub fn override_string_type_parameter( env: &Env, typ: TypeId, configured_parameters: &[&config::functions::Parameter], ) -> TypeId { let string_type = configured_parameters.iter().find_map(|p| p.string_type); apply(env, typ, string_type) } pub fn override_string_type_return( env: &Env, typ: TypeId, configured_functions: &[&config::functions::Function], ) -> TypeId { let string_type = configured_functions.iter().find_map(|f| f.ret.string_type); apply(env, typ, string_type) } fn
(env: &Env, type_id: TypeId, string_type: Option<config::StringType>) -> TypeId { let string_type = if let Some(string_type) = string_type { string_type } else { return type_id; }; let replace = { use crate::config::StringType::*; match string_type { Utf8 => TypeId::tid_utf8(), Filename => TypeId::tid_filename(), OsString => TypeId::tid_os_string(), } }; match *env.library.type_(type_id) { Type::Fundamental(Fundamental::Filename) => replace, Type::Fundamental(Fundamental::OsString) => replace, Type::Fundamental(Fundamental::Utf8) => replace, Type::CArray(inner_tid) if can_overriden_fundamental(env, inner_tid) => { Type::find_c_array(&env.library, replace, None) } _ => { error!( "Bad type {0} for string_type override", type_id.full_name(&env.library) ); type_id } } } fn can_overriden_fundamental(env: &Env, type_id: TypeId) -> bool { matches!( *env.library.type_(type_id), Type::Fundamental(Fundamental::Filename) | Type::Fundamental(Fundamental::OsString) | Type::Fundamental(Fundamental::Utf8) ) }
apply
issue-44800.rs
use std::alloc::System; use std::collections::VecDeque; #[global_allocator] static ALLOCATOR: System = System; fn main()
{ let mut deque = VecDeque::with_capacity(32); deque.push_front(0); deque.reserve(31); deque.push_back(0); }
type_traits.rs
// Copyright 2021 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use common_arrow::arrow::compute::arithmetics::basic::NativeArithmetics; use num::NumCast; use serde::de::DeserializeOwned; use serde::Serialize; use crate::DFTryFrom; use crate::DataTypeImpl; use crate::DataValue; use crate::DateType; use crate::Scalar; use crate::TimestampType; use crate::TypeID; use crate::VariantType; use crate::VariantValue; pub trait PrimitiveType: NativeArithmetics + DFTryFrom<DataValue> + NumCast + PartialOrd + Into<DataValue> + Default + Serialize + DeserializeOwned + Scalar { type LargestType: PrimitiveType; const SIGN: bool; const FLOATING: bool; const SIZE: usize; } macro_rules! impl_primitive { ($ca:ident, $lg: ident, $sign: expr, $floating: expr, $size: expr) => { impl PrimitiveType for $ca { type LargestType = $lg; const SIGN: bool = $sign; const FLOATING: bool = $floating; const SIZE: usize = $size; } }; } impl_primitive!(u8, u64, false, false, 1); impl_primitive!(u16, u64, false, false, 2); impl_primitive!(u32, u64, false, false, 4); impl_primitive!(u64, u64, false, false, 8); impl_primitive!(i8, i64, true, false, 1); impl_primitive!(i16, i64, true, false, 2); impl_primitive!(i32, i64, true, false, 4); impl_primitive!(i64, i64, true, false, 8); impl_primitive!(f32, f64, true, true, 4); impl_primitive!(f64, f64, true, true, 8); pub trait IntegerType: PrimitiveType {} macro_rules! impl_integer { ($ca:ident, $native:ident) => { impl IntegerType for $ca {} }; } impl_integer!(u8, u8); impl_integer!(u16, u16); impl_integer!(u32, u32); impl_integer!(u64, u64); impl_integer!(i8, i8); impl_integer!(i16, i16); impl_integer!(i32, i32); impl_integer!(i64, i64); pub trait FloatType: PrimitiveType {} impl FloatType for f32 {} impl FloatType for f64 {} pub trait LogicalDateType: PrimitiveType { fn get_type_id() -> TypeID; } impl LogicalDateType for i32 { fn get_type_id() -> TypeID { TypeID::Date } } impl LogicalDateType for i64 { fn get_type_id() -> TypeID { TypeID::Timestamp } } pub trait ToDateType { fn to_date_type() -> DataTypeImpl; } impl ToDateType for i32 { fn to_date_type() -> DataTypeImpl { DateType::new_impl() } } impl ToDateType for i64 { fn to_date_type() -> DataTypeImpl { TimestampType::new_impl(6) } } pub trait ObjectType: std::fmt::Display + Clone + std::marker::Sync + std::marker::Send + DFTryFrom<DataValue> + Into<DataValue> + core::str::FromStr + DeserializeOwned + Serialize + Default + Scalar { fn data_type() -> DataTypeImpl; } impl ObjectType for VariantValue { fn data_type() -> DataTypeImpl { VariantType::new_impl()
} }
ArnoldRenderTest.py
########################################################################## # # Copyright (c) 2012, John Haddon. All rights reserved. # Copyright (c) 2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import os import inspect import unittest import subprocess32 as subprocess import threading import arnold import imath import six
import IECore import IECoreImage import IECoreScene import IECoreArnold import Gaffer import GafferTest import GafferDispatch import GafferImage import GafferScene import GafferSceneTest import GafferOSL import GafferArnold import GafferArnoldTest class ArnoldRenderTest( GafferSceneTest.SceneTestCase ) : def setUp( self ) : GafferSceneTest.SceneTestCase.setUp( self ) self.__scriptFileName = self.temporaryDirectory() + "/test.gfr" def tearDown( self ) : GafferSceneTest.SceneTestCase.tearDown( self ) GafferScene.deregisterAdaptor( "Test" ) def testExecute( self ) : s = Gaffer.ScriptNode() s["plane"] = GafferScene.Plane() s["render"] = GafferArnold.ArnoldRender() s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode ) s["render"]["in"].setInput( s["plane"]["out"] ) s["expression"] = Gaffer.Expression() s["expression"].setExpression( "parent['render']['fileName'] = '" + self.temporaryDirectory() + "/test.%d.ass' % int( context['frame'] )" ) s["fileName"].setValue( self.__scriptFileName ) s.save() p = subprocess.Popen( "gaffer execute " + self.__scriptFileName + " -frames 1-3", shell=True, stderr = subprocess.PIPE, ) p.wait() self.assertFalse( p.returncode ) for i in range( 1, 4 ) : self.assertTrue( os.path.exists( self.temporaryDirectory() + "/test.%d.ass" % i ) ) def testWaitForImage( self ) : s = Gaffer.ScriptNode() s["plane"] = GafferScene.Plane() s["outputs"] = GafferScene.Outputs() s["outputs"].addOutput( "beauty", IECoreScene.Output( self.temporaryDirectory() + "/test.tif", "tiff", "rgba", {} ) ) s["outputs"]["in"].setInput( s["plane"]["out"] ) s["render"] = GafferArnold.ArnoldRender() s["render"]["in"].setInput( s["outputs"]["out"] ) s["render"]["task"].execute() self.assertTrue( os.path.exists( self.temporaryDirectory() + "/test.tif" ) ) def testExecuteWithStringSubstitutions( self ) : s = Gaffer.ScriptNode() s["plane"] = GafferScene.Plane() s["render"] = GafferArnold.ArnoldRender() s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode ) s["render"]["in"].setInput( s["plane"]["out"] ) s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.####.ass" ) s["fileName"].setValue( self.__scriptFileName ) s.save() p = subprocess.Popen( "gaffer execute " + self.__scriptFileName + " -frames 1-3", shell=True, stderr = subprocess.PIPE, ) p.wait() self.assertFalse( p.returncode ) for i in range( 1, 4 ) : self.assertTrue( os.path.exists( self.temporaryDirectory() + "/test.%04d.ass" % i ) ) def testImageOutput( self ) : s = Gaffer.ScriptNode() s["plane"] = GafferScene.Plane() s["outputs"] = GafferScene.Outputs() s["outputs"].addOutput( "beauty", IECoreScene.Output( self.temporaryDirectory() + "/test.####.tif", "tiff", "rgba", {} ) ) s["outputs"]["in"].setInput( s["plane"]["out"] ) s["render"] = GafferArnold.ArnoldRender() s["render"]["in"].setInput( s["outputs"]["out"] ) c = Gaffer.Context() for i in range( 1, 4 ) : c.setFrame( i ) with c : s["render"]["task"].execute() for i in range( 1, 4 ) : self.assertTrue( os.path.exists( self.temporaryDirectory() + "/test.%04d.tif" % i ) ) def testTypeNamePrefixes( self ) : self.assertTypeNamesArePrefixed( GafferArnold ) self.assertTypeNamesArePrefixed( GafferArnoldTest ) def testDefaultNames( self ) : self.assertDefaultNamesAreCorrect( GafferArnold ) self.assertDefaultNamesAreCorrect( GafferArnoldTest ) def testNodesConstructWithDefaultValues( self ) : self.assertNodesConstructWithDefaultValues( GafferArnold ) self.assertNodesConstructWithDefaultValues( GafferArnoldTest ) def testDirectoryCreation( self ) : s = Gaffer.ScriptNode() s["variables"].addChild( Gaffer.NameValuePlug( "renderDirectory", self.temporaryDirectory() + "/renderTests" ) ) s["variables"].addChild( Gaffer.NameValuePlug( "assDirectory", self.temporaryDirectory() + "/assTests" ) ) s["plane"] = GafferScene.Plane() s["outputs"] = GafferScene.Outputs() s["outputs"]["in"].setInput( s["plane"]["out"] ) s["outputs"].addOutput( "beauty", IECoreScene.Output( "$renderDirectory/test.####.exr", "exr", "rgba", {} ) ) s["render"] = GafferArnold.ArnoldRender() s["render"]["in"].setInput( s["outputs"]["out"] ) s["render"]["fileName"].setValue( "$assDirectory/test.####.ass" ) s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode ) self.assertFalse( os.path.exists( self.temporaryDirectory() + "/renderTests" ) ) self.assertFalse( os.path.exists( self.temporaryDirectory() + "/assTests" ) ) self.assertFalse( os.path.exists( self.temporaryDirectory() + "/assTests/test.0001.ass" ) ) s["fileName"].setValue( self.temporaryDirectory() + "/test.gfr" ) with s.context() : s["render"]["task"].execute() self.assertTrue( os.path.exists( self.temporaryDirectory() + "/renderTests" ) ) self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests" ) ) self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests/test.0001.ass" ) ) # check it can cope with everything already existing with s.context() : s["render"]["task"].execute() self.assertTrue( os.path.exists( self.temporaryDirectory() + "/renderTests" ) ) self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests" ) ) self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests/test.0001.ass" ) ) def testWedge( self ) : s = Gaffer.ScriptNode() s["sphere"] = GafferScene.Sphere() s["sphere"]["sets"].setValue( "${wedge:value}" ) s["filter"] = GafferScene.SetFilter() s["filter"]["setExpression"].setValue( "hidden" ) s["attributes"] = GafferScene.StandardAttributes() s["attributes"]["attributes"]["visibility"]["enabled"].setValue( True ) s["attributes"]["attributes"]["visibility"]["value"].setValue( False ) s["attributes"]["filter"].setInput( s["filter"]["out"] ) s["attributes"]["in"].setInput( s["sphere"]["out"] ) s["outputs"] = GafferScene.Outputs() s["outputs"].addOutput( "beauty", IECoreScene.Output( self.temporaryDirectory() + "/${wedge:value}.tif", "tiff", "rgba", { } ) ) s["outputs"]["in"].setInput( s["attributes"]["out"] ) s["render"] = GafferArnold.ArnoldRender() s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.####.ass" ) s["render"]["in"].setInput( s["outputs"]["out"] ) s["wedge"] = GafferDispatch.Wedge() s["wedge"]["mode"].setValue( int( s["wedge"].Mode.StringList ) ) s["wedge"]["strings"].setValue( IECore.StringVectorData( [ "visible", "hidden" ] ) ) s["wedge"]["preTasks"][0].setInput( s["render"]["task"] ) s["fileName"].setValue( self.temporaryDirectory() + "/test.gfr" ) s.save() dispatcher = GafferDispatch.LocalDispatcher() dispatcher["jobsDirectory"].setValue( self.temporaryDirectory() + "/testJobDirectory" ) dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.CurrentFrame ) dispatcher["executeInBackground"].setValue( False ) dispatcher.dispatch( [ s["wedge"] ] ) hidden = GafferImage.ImageReader() hidden["fileName"].setValue( self.temporaryDirectory() + "/hidden.tif" ) visible = GafferImage.ImageReader() visible["fileName"].setValue( self.temporaryDirectory() + "/visible.tif" ) hiddenStats = GafferImage.ImageStats() hiddenStats["in"].setInput( hidden["out"] ) hiddenStats["area"].setValue( hiddenStats["in"]["dataWindow"].getValue() ) visibleStats = GafferImage.ImageStats() visibleStats["in"].setInput( visible["out"] ) visibleStats["area"].setValue( visibleStats["in"]["dataWindow"].getValue() ) self.assertLess( hiddenStats["average"].getValue()[0], 0.05 ) self.assertGreater( visibleStats["average"].getValue()[0], .27 ) @staticmethod def __m44f( m ) : return imath.M44f( *[ i for row in m.data for i in row ] ) def testTransformMotion( self ) : s = Gaffer.ScriptNode() s["plane"] = GafferScene.Plane() s["sphere"] = GafferScene.Sphere() s["group"] = GafferScene.Group() s["group"]["in"][0].setInput( s["plane"]["out"] ) s["group"]["in"][1].setInput( s["sphere"]["out"] ) s["expression"] = Gaffer.Expression() s["expression"].setExpression( inspect.cleandoc( """ parent["plane"]["transform"]["translate"]["x"] = context.getFrame() parent["sphere"]["transform"]["translate"]["y"] = context.getFrame() * 2 parent["group"]["transform"]["translate"]["z"] = context.getFrame() - 1 """ ) ) s["planeFilter"] = GafferScene.PathFilter() s["planeFilter"]["paths"].setValue( IECore.StringVectorData( [ "/group/plane" ] ) ) s["attributes"] = GafferScene.StandardAttributes() s["attributes"]["in"].setInput( s["group"]["out"] ) s["attributes"]["filter"].setInput( s["planeFilter"]["out"] ) s["attributes"]["attributes"]["transformBlur"]["enabled"].setValue( True ) s["attributes"]["attributes"]["transformBlur"]["value"].setValue( False ) s["options"] = GafferScene.StandardOptions() s["options"]["in"].setInput( s["attributes"]["out"] ) s["options"]["options"]["shutter"]["enabled"].setValue( True ) s["options"]["options"]["transformBlur"]["enabled"].setValue( True ) s["render"] = GafferArnold.ArnoldRender() s["render"]["in"].setInput( s["options"]["out"] ) s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode ) s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" ) # No motion blur s["options"]["options"]["transformBlur"]["value"].setValue( False ) s["render"]["task"].execute() with IECoreArnold.UniverseBlock( writable = True ) : arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" ) camera = arnold.AiNodeLookUpByName( "gaffer:defaultCamera" ) sphere = arnold.AiNodeLookUpByName( "/group/sphere" ) sphereMotionStart = arnold.AiNodeGetFlt( sphere, "motion_start" ) sphereMotionEnd = arnold.AiNodeGetFlt( sphere, "motion_end" ) sphereMatrix = arnold.AiNodeGetMatrix( sphere, "matrix" ) plane = arnold.AiNodeLookUpByName( "/group/plane" ) planeMotionStart = arnold.AiNodeGetFlt( plane, "motion_start" ) planeMotionEnd = arnold.AiNodeGetFlt( plane, "motion_end" ) planeMatrix = arnold.AiNodeGetMatrix( plane, "matrix" ) # Motion parameters should be left at default self.assertEqual( sphereMotionStart, 0 ) self.assertEqual( sphereMotionEnd, 1 ) self.assertEqual( planeMotionStart, 0 ) self.assertEqual( planeMotionEnd, 1 ) expectedSphereMatrix = arnold.AiM4Translation( arnold.AtVector( 0, 2, 0 ) ) expectedPlaneMatrix = arnold.AiM4Translation( arnold.AtVector( 1, 0, 0 ) ) self.assertEqual( self.__m44f( sphereMatrix ), self.__m44f( expectedSphereMatrix ) ) self.assertEqual( self.__m44f( planeMatrix ), self.__m44f( expectedPlaneMatrix ) ) self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_start" ), 1 ) self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_end" ), 1 ) # Motion blur s["options"]["options"]["transformBlur"]["value"].setValue( True ) s["render"]["task"].execute() with IECoreArnold.UniverseBlock( writable = True ) : arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" ) camera = arnold.AiNodeLookUpByName( "gaffer:defaultCamera" ) sphere = arnold.AiNodeLookUpByName( "/group/sphere" ) sphereMotionStart = arnold.AiNodeGetFlt( sphere, "motion_start" ) sphereMotionEnd = arnold.AiNodeGetFlt( sphere, "motion_end" ) sphereMatrices = arnold.AiNodeGetArray( sphere, "matrix" ) plane = arnold.AiNodeLookUpByName( "/group/plane" ) planeMotionStart = arnold.AiNodeGetFlt( plane, "motion_start" ) planeMotionEnd = arnold.AiNodeGetFlt( plane, "motion_end" ) planeMatrices = arnold.AiNodeGetArray( plane, "matrix" ) self.assertEqual( sphereMotionStart, 0.75 ) self.assertEqual( sphereMotionEnd, 1.25 ) self.assertEqual( arnold.AiArrayGetNumElements( sphereMatrices.contents ), 1 ) self.assertEqual( arnold.AiArrayGetNumKeys( sphereMatrices.contents ), 2 ) self.assertEqual( planeMotionStart, 0.75 ) self.assertEqual( planeMotionEnd, 1.25 ) self.assertEqual( arnold.AiArrayGetNumElements( planeMatrices.contents ), 1 ) self.assertEqual( arnold.AiArrayGetNumKeys( planeMatrices.contents ), 2 ) for i in range( 0, 2 ) : frame = 0.75 + 0.5 * i sphereMatrix = arnold.AiArrayGetMtx( sphereMatrices, i ) expectedSphereMatrix = arnold.AiM4Translation( arnold.AtVector( 0, frame * 2, frame - 1 ) ) planeMatrix = arnold.AiArrayGetMtx( planeMatrices, i ) expectedPlaneMatrix = arnold.AiM4Translation( arnold.AtVector( 1, 0, frame - 1 ) ) self.assertEqual( self.__m44f( sphereMatrix ), self.__m44f( expectedSphereMatrix ) ) self.assertEqual( self.__m44f( planeMatrix ), self.__m44f( expectedPlaneMatrix ) ) self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_start" ), 0.75 ) self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_end" ), 1.25 ) # Motion blur on, but sampleMotion off s["options"]["options"]["sampleMotion"]["enabled"].setValue( True ) s["options"]["options"]["sampleMotion"]["value"].setValue( False ) s["render"]["task"].execute() with IECoreArnold.UniverseBlock( writable = True ) : arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" ) camera = arnold.AiNodeLookUpByName( "gaffer:defaultCamera" ) sphere = arnold.AiNodeLookUpByName( "/group/sphere" ) sphereMotionStart = arnold.AiNodeGetFlt( sphere, "motion_start" ) sphereMotionEnd = arnold.AiNodeGetFlt( sphere, "motion_end" ) sphereMatrices = arnold.AiNodeGetArray( sphere, "matrix" ) plane = arnold.AiNodeLookUpByName( "/group/plane" ) planeMotionStart = arnold.AiNodeGetFlt( plane, "motion_start" ) planeMotionEnd = arnold.AiNodeGetFlt( plane, "motion_end" ) planeMatrices = arnold.AiNodeGetArray( plane, "matrix" ) self.assertEqual( sphereMotionStart, 0.75 ) self.assertEqual( sphereMotionEnd, 1.25 ) self.assertEqual( arnold.AiArrayGetNumElements( sphereMatrices.contents ), 1 ) self.assertEqual( arnold.AiArrayGetNumKeys( sphereMatrices.contents ), 2 ) self.assertEqual( planeMotionStart, 0.75 ) self.assertEqual( planeMotionEnd, 1.25 ) self.assertEqual( arnold.AiArrayGetNumElements( planeMatrices.contents ), 1 ) self.assertEqual( arnold.AiArrayGetNumKeys( planeMatrices.contents ), 2 ) for i in range( 0, 2 ) : frame = 0.75 + 0.5 * i sphereMatrix = arnold.AiArrayGetMtx( sphereMatrices, i ) expectedSphereMatrix = arnold.AiM4Translation( arnold.AtVector( 0, frame * 2, frame - 1 ) ) planeMatrix = arnold.AiArrayGetMtx( planeMatrices, i ) expectedPlaneMatrix = arnold.AiM4Translation( arnold.AtVector( 1, 0, frame - 1 ) ) self.assertEqual( self.__m44f( sphereMatrix ), self.__m44f( expectedSphereMatrix ) ) self.assertEqual( self.__m44f( planeMatrix ), self.__m44f( expectedPlaneMatrix ) ) self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_start" ), 0.75 ) self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_end" ), 0.75 ) def testResolution( self ) : s = Gaffer.ScriptNode() s["camera"] = GafferScene.Camera() s["options"] = GafferScene.StandardOptions() s["options"]["in"].setInput( s["camera"]["out"] ) s["options"]["options"]["renderResolution"]["enabled"].setValue( True ) s["options"]["options"]["renderResolution"]["value"].setValue( imath.V2i( 200, 100 ) ) s["options"]["options"]["resolutionMultiplier"]["enabled"].setValue( True ) s["options"]["options"]["resolutionMultiplier"]["value"].setValue( 2 ) s["render"] = GafferArnold.ArnoldRender() s["render"]["in"].setInput( s["options"]["out"] ) s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode ) s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" ) # Default camera should have the right resolution. s["render"]["task"].execute() with IECoreArnold.UniverseBlock( writable = True ) : arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" ) options = arnold.AiUniverseGetOptions() self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 400 ) self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 200 ) # As should a camera picked from the scene. s["options"]["options"]["renderCamera"]["enabled"].setValue( True ) s["options"]["options"]["renderCamera"]["value"].setValue( "/camera" ) s["render"]["task"].execute() with IECoreArnold.UniverseBlock( writable = True ) : arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" ) options = arnold.AiUniverseGetOptions() self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 400 ) self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 200 ) def testRenderRegion( self ) : s = Gaffer.ScriptNode() s["camera"] = GafferScene.Camera() s["options"] = GafferScene.StandardOptions() s["options"]["in"].setInput( s["camera"]["out"] ) s["options"]["options"]["renderCamera"]["enabled"].setValue( True ) s["options"]["options"]["renderCamera"]["value"].setValue( "/camera" ) s["render"] = GafferArnold.ArnoldRender() s["render"]["in"].setInput( s["options"]["out"] ) s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode ) s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" ) # Default region s["render"]["task"].execute() with IECoreArnold.UniverseBlock( writable = True ) : arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" ) options = arnold.AiUniverseGetOptions() self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 ) self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 ) self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), 0 ) self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 639 ) self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), 0 ) self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 479 ) # Apply Crop Window s["options"]["options"]["renderCropWindow"]["enabled"].setValue( True ) s["options"]["options"]["renderCropWindow"]["value"].setValue( imath.Box2f( imath.V2f( 0.25, 0.5 ), imath.V2f( 0.75, 1.0 ) ) ) s["render"]["task"].execute() with IECoreArnold.UniverseBlock( writable = True ) : arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" ) options = arnold.AiUniverseGetOptions() self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 ) self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 ) self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), 160 ) self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 479 ) self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), 240 ) self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 479 ) # Test Empty Crop Window s["options"]["options"]["renderCropWindow"]["value"].setValue( imath.Box2f() ) s["render"]["task"].execute() with IECoreArnold.UniverseBlock( writable = True ) : arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" ) options = arnold.AiUniverseGetOptions() self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 ) self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 ) # Since Arnold doesn't support empty regions, we default to one pixel in the corner self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), 0 ) self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 0 ) self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), 479 ) self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 479 ) # Apply Overscan s["options"]["options"]["renderCropWindow"]["enabled"].setValue( False ) s["options"]["options"]["overscan"]["enabled"].setValue( True ) s["options"]["options"]["overscan"]["value"].setValue( True ) s["options"]["options"]["overscanTop"]["enabled"].setValue( True ) s["options"]["options"]["overscanTop"]["value"].setValue( 0.1 ) s["options"]["options"]["overscanBottom"]["enabled"].setValue( True ) s["options"]["options"]["overscanBottom"]["value"].setValue( 0.2 ) s["options"]["options"]["overscanLeft"]["enabled"].setValue( True ) s["options"]["options"]["overscanLeft"]["value"].setValue( 0.3 ) s["options"]["options"]["overscanRight"]["enabled"].setValue( True ) s["options"]["options"]["overscanRight"]["value"].setValue( 0.4 ) s["render"]["task"].execute() with IECoreArnold.UniverseBlock( writable = True ) : arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" ) options = arnold.AiUniverseGetOptions() self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 ) self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 ) self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), -192 ) self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 640 + 255 ) self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), -48 ) self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 480 + 95 ) def testMissingCameraRaises( self ) : s = Gaffer.ScriptNode() s["options"] = GafferScene.StandardOptions() s["options"]["options"]["renderCamera"]["enabled"].setValue( True ) s["options"]["options"]["renderCamera"]["value"].setValue( "/i/dont/exist" ) s["render"] = GafferArnold.ArnoldRender() s["render"]["in"].setInput( s["options"]["out"] ) s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode ) s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" ) # The requested camera doesn't exist - this should raise an exception. six.assertRaisesRegex( self, RuntimeError, "/i/dont/exist", s["render"]["task"].execute ) # And even the existence of a different camera shouldn't change that. s["camera"] = GafferScene.Camera() s["options"]["in"].setInput( s["camera"]["out"] ) six.assertRaisesRegex( self, RuntimeError, "/i/dont/exist", s["render"]["task"].execute ) def testManyCameras( self ) : camera = GafferScene.Camera() duplicate = GafferScene.Duplicate() duplicate["in"].setInput( camera["out"] ) duplicate["target"].setValue( "/camera" ) duplicate["copies"].setValue( 1000 ) render = GafferArnold.ArnoldRender() render["in"].setInput( duplicate["out"] ) render["mode"].setValue( render.Mode.SceneDescriptionMode ) render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" ) render["task"].execute() def testTwoRenders( self ) : sphere = GafferScene.Sphere() duplicate = GafferScene.Duplicate() duplicate["in"].setInput( sphere["out"] ) duplicate["target"].setValue( "/sphere" ) duplicate["copies"].setValue( 10000 ) render = GafferArnold.ArnoldRender() render["in"].setInput( duplicate["out"] ) render["mode"].setValue( render.Mode.SceneDescriptionMode ) render["fileName"].setValue( self.temporaryDirectory() + "/test.####.ass" ) errors = [] def executeFrame( frame ) : with Gaffer.Context() as c : c.setFrame( frame ) try : render["task"].execute() except Exception as e : errors.append( str( e ) ) threads = [] for i in range( 0, 2 ) : t = threading.Thread( target = executeFrame, args = ( i, ) ) t.start() threads.append( t ) for t in threads : t.join() self.assertEqual( len( errors ), 1 ) self.assertTrue( "Arnold is already in use" in errors[0] ) def testTraceSets( self ) : sphere = GafferScene.Sphere() group = GafferScene.Group() group["in"][0].setInput( sphere["out"] ) group["in"][1].setInput( sphere["out"] ) set1 = GafferScene.Set() set1["name"].setValue( "render:firstSphere" ) set1["paths"].setValue( IECore.StringVectorData( [ "/group/sphere" ] ) ) set1["in"].setInput( group["out"] ) set2 = GafferScene.Set() set2["name"].setValue( "render:secondSphere" ) set2["paths"].setValue( IECore.StringVectorData( [ "/group/sphere1" ] ) ) set2["in"].setInput( set1["out"] ) set3 = GafferScene.Set() set3["name"].setValue( "render:group" ) set3["paths"].setValue( IECore.StringVectorData( [ "/group" ] ) ) set3["in"].setInput( set2["out"] ) set4 = GafferScene.Set() set4["name"].setValue( "render:bothSpheres" ) set4["paths"].setValue( IECore.StringVectorData( [ "/group/sphere", "/group/sphere1" ] ) ) set4["in"].setInput( set3["out"] ) render = GafferArnold.ArnoldRender() render["in"].setInput( set4["out"] ) render["mode"].setValue( render.Mode.SceneDescriptionMode ) render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" ) render["task"].execute() with IECoreArnold.UniverseBlock( writable = True ) : arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" ) firstSphere = arnold.AiNodeLookUpByName( "/group/sphere" ) secondSphere = arnold.AiNodeLookUpByName( "/group/sphere1" ) self.assertEqual( self.__arrayToSet( arnold.AiNodeGetArray( firstSphere, "trace_sets" ) ), { "firstSphere", "group", "bothSpheres" } ) self.assertEqual( self.__arrayToSet( arnold.AiNodeGetArray( secondSphere, "trace_sets" ) ), { "secondSphere", "group", "bothSpheres" } ) def testSetsNeedContextEntry( self ) : script = Gaffer.ScriptNode() script["light"] = GafferArnold.ArnoldLight() script["light"].loadShader( "point_light" ) script["expression"] = Gaffer.Expression() script["expression"].setExpression( """parent["light"]["name"] = context["lightName"]""" ) script["render"] = GafferArnold.ArnoldRender() script["render"]["in"].setInput( script["light"]["out"] ) script["render"]["mode"].setValue( script["render"].Mode.SceneDescriptionMode ) script["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" ) for i in range( 0, 100 ) : with Gaffer.Context() as context : context["lightName"] = "light%d" % i script["render"]["task"].execute() def testFrameAndAASeed( self ) : options = GafferArnold.ArnoldOptions() render = GafferArnold.ArnoldRender() render["in"].setInput( options["out"] ) render["mode"].setValue( render.Mode.SceneDescriptionMode ) render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" ) for frame in ( 1, 2, 2.8, 3.2 ) : for seed in ( None, 3, 4 ) : with Gaffer.Context() as c : c.setFrame( frame ) options["options"]["aaSeed"]["enabled"].setValue( seed is not None ) options["options"]["aaSeed"]["value"].setValue( seed or 1 ) render["task"].execute() with IECoreArnold.UniverseBlock( writable = True ) : arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" ) self.assertEqual( arnold.AiNodeGetInt( arnold.AiUniverseGetOptions(), "AA_seed" ), seed or round( frame ) ) def testRendererContextVariable( self ) : sphere = GafferScene.Sphere() sphere["name"].setValue( "sphere${scene:renderer}" ) render = GafferArnold.ArnoldRender() render["in"].setInput( sphere["out"] ) render["mode"].setValue( render.Mode.SceneDescriptionMode ) render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" ) render["task"].execute() with IECoreArnold.UniverseBlock( writable = True ) : arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" ) self.assertTrue( arnold.AiNodeLookUpByName( "/sphereArnold" ) is not None ) def testAdaptors( self ) : sphere = GafferScene.Sphere() def a() : result = GafferArnold.ArnoldAttributes() result["attributes"]["matte"]["enabled"].setValue( True ) result["attributes"]["matte"]["value"].setValue( True ) return result GafferScene.registerAdaptor( "Test", a ) sphere = GafferScene.Sphere() render = GafferArnold.ArnoldRender() render["in"].setInput( sphere["out"] ) render["mode"].setValue( render.Mode.SceneDescriptionMode ) render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" ) render["task"].execute() with IECoreArnold.UniverseBlock( writable = True ) : arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" ) node = arnold.AiNodeLookUpByName( "/sphere" ) self.assertEqual( arnold.AiNodeGetBool( node, "matte" ), True ) def testLightAndShadowLinking( self ) : sphere1 = GafferScene.Sphere() sphere2 = GafferScene.Sphere() attributes = GafferScene.StandardAttributes() arnoldAttributes = GafferArnold.ArnoldAttributes() light1 = GafferArnold.ArnoldLight() light1.loadShader( "point_light" ) light2 = GafferArnold.ArnoldLight() light2.loadShader( "point_light" ) group = GafferScene.Group() render = GafferArnold.ArnoldRender() attributes["in"].setInput( sphere1["out"] ) arnoldAttributes["in"].setInput( attributes["out"] ) group["in"][0].setInput( arnoldAttributes["out"] ) group["in"][1].setInput( light1["out"] ) group["in"][2].setInput( light2["out"] ) group["in"][3].setInput( sphere2["out"] ) render["in"].setInput( group["out"] ) # Illumination attributes["attributes"]["linkedLights"]["enabled"].setValue( True ) attributes["attributes"]["linkedLights"]["value"].setValue( "/group/light" ) # Shadows arnoldAttributes["attributes"]["shadowGroup"]["enabled"].setValue( True ) arnoldAttributes["attributes"]["shadowGroup"]["value"].setValue( "/group/light1" ) render["mode"].setValue( render.Mode.SceneDescriptionMode ) render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" ) render["task"].execute() with IECoreArnold.UniverseBlock( writable = True ) : arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" ) # the first sphere had linked lights sphere = arnold.AiNodeLookUpByName( "/group/sphere" ) # check illumination self.assertTrue( arnold.AiNodeGetBool( sphere, "use_light_group" ) ) lights = arnold.AiNodeGetArray( sphere, "light_group" ) self.assertEqual( arnold.AiArrayGetNumElements( lights ), 1 ) self.assertEqual( arnold.AiNodeGetName( arnold.AiArrayGetPtr( lights, 0 ) ), "light:/group/light" ) # check shadows self.assertTrue( arnold.AiNodeGetBool( sphere, "use_shadow_group" ) ) shadows = arnold.AiNodeGetArray( sphere, "shadow_group" ) self.assertEqual( arnold.AiArrayGetNumElements( shadows ), 1 ) self.assertEqual( arnold.AiNodeGetName( arnold.AiArrayGetPtr( shadows, 0 ) ), "light:/group/light1" ) # the second sphere does not have any light linking enabled sphere1 = arnold.AiNodeLookUpByName( "/group/sphere1" ) # check illumination self.assertFalse( arnold.AiNodeGetBool( sphere1, "use_light_group" ) ) lights = arnold.AiNodeGetArray( sphere1, "light_group" ) self.assertEqual( arnold.AiArrayGetNumElements( lights ), 0 ) # check shadows self.assertFalse( arnold.AiNodeGetBool( sphere1, "use_shadow_group" ) ) shadows = arnold.AiNodeGetArray( sphere1, "shadow_group" ) self.assertEqual( arnold.AiArrayGetNumElements( shadows ), 0 ) def testNoLinkedLightsOnLights( self ) : sphere = GafferScene.Sphere() meshLightShader = GafferArnold.ArnoldShader() meshLightShader.loadShader( "flat" ) meshLightFilter = GafferScene.PathFilter() meshLightFilter["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) ) meshLight = GafferArnold.ArnoldMeshLight() meshLight["in"].setInput( sphere["out"] ) meshLight["filter"].setInput( meshLightFilter["out"] ) meshLight["parameters"]["color"].setInput( meshLightShader["out"] ) light1 = GafferArnold.ArnoldLight() light1.loadShader( "point_light" ) light2 = GafferArnold.ArnoldLight() light2.loadShader( "point_light" ) # Trigger light linking by unlinking a light light2["defaultLight"].setValue( False ) group = GafferScene.Group() group["in"][0].setInput( meshLight["out"] ) group["in"][1].setInput( light1["out"] ) group["in"][2].setInput( light2["out"] ) render = GafferArnold.ArnoldRender() render["in"].setInput( group["out"] ) render["mode"].setValue( render.Mode.SceneDescriptionMode ) render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" ) render["task"].execute() with IECoreArnold.UniverseBlock( writable = True ) : arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" ) sphere = arnold.AiNodeLookUpByName( "/group/sphere" ) self.assertIsNotNone( sphere ) self.assertEqual( arnold.AiArrayGetNumElements( arnold.AiNodeGetArray( sphere, "light_group" ) ), 0 ) self.assertFalse( arnold.AiNodeGetBool( sphere, "use_light_group" ) ) def testLightFilters( self ) : s = Gaffer.ScriptNode() s["lightFilter"] = GafferArnold.ArnoldLightFilter() s["lightFilter"].loadShader( "light_blocker" ) s["attributes"] = GafferScene.StandardAttributes() s["attributes"]["in"].setInput( s["lightFilter"]["out"] ) s["attributes"]["attributes"]["filteredLights"]["enabled"].setValue( True ) s["attributes"]["attributes"]["filteredLights"]["value"].setValue( "defaultLights" ) s["light"] = GafferArnold.ArnoldLight() s["light"].loadShader( "point_light" ) s["gobo"] = GafferArnold.ArnoldShader() s["gobo"].loadShader( "gobo" ) s["assignment"] = GafferScene.ShaderAssignment() s["assignment"]["in"].setInput( s["light"]["out"] ) s["assignment"]["shader"].setInput( s["gobo"]["out"] ) s["group"] = GafferScene.Group() s["group"]["in"][0].setInput( s["attributes"]["out"] ) s["group"]["in"][1].setInput( s["assignment"]["out"] ) s["render"] = GafferArnold.ArnoldRender() s["render"]["in"].setInput( s["group"]["out"] ) s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode ) s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" ) s["render"]["task"].execute() with IECoreArnold.UniverseBlock( writable = True ) : arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" ) light = arnold.AiNodeLookUpByName( "light:/group/light" ) linkedFilters = arnold.AiNodeGetArray( light, "filters" ) numFilters = arnold.AiArrayGetNumElements( linkedFilters.contents ) self.assertEqual( numFilters, 2 ) linkedFilter = arnold.cast(arnold.AiArrayGetPtr(linkedFilters, 0), arnold.POINTER(arnold.AtNode)) linkedGobo = arnold.cast(arnold.AiArrayGetPtr(linkedFilters, 1), arnold.POINTER(arnold.AtNode)) self.assertEqual( arnold.AiNodeGetName( linkedFilter ), "lightFilter:/group/lightFilter" ) self.assertEqual( arnold.AiNodeEntryGetName( arnold.AiNodeGetNodeEntry( linkedFilter ) ), "light_blocker" ) self.assertEqual( arnold.AiNodeEntryGetName( arnold.AiNodeGetNodeEntry( linkedGobo ) ), "gobo" ) @GafferTest.TestRunner.PerformanceTestMethod( repeat = 1 ) def testLightFiltersMany( self ) : numLights = 10000 numLightFilters = 10000 s = Gaffer.ScriptNode() s["lightFilter"] = GafferArnold.ArnoldLightFilter() s["lightFilter"].loadShader( "light_blocker" ) s["lightFilter"]["filteredLights"].setValue( "defaultLights" ) s["planeFilters"] = GafferScene.Plane( "Plane" ) s["planeFilters"]["divisions"].setValue( imath.V2i( 1, numLightFilters / 2 - 1 ) ) s["instancerFilters"] = GafferScene.Instancer( "Instancer" ) s["instancerFilters"]["in"].setInput( s["planeFilters"]["out"] ) s["instancerFilters"]["instances"].setInput( s["lightFilter"]["out"] ) s["instancerFilters"]["parent"].setValue( "/plane" ) s["light"] = GafferArnold.ArnoldLight() s["light"].loadShader( "point_light" ) s["planeLights"] = GafferScene.Plane( "Plane" ) s["planeLights"]["divisions"].setValue( imath.V2i( 1, numLights / 2 - 1 ) ) s["instancerLights"] = GafferScene.Instancer( "Instancer" ) s["instancerLights"]["in"].setInput( s["planeLights"]["out"] ) s["instancerLights"]["instances"].setInput( s["light"]["out"] ) s["instancerLights"]["parent"].setValue( "/plane" ) s["group"] = GafferScene.Group( "Group" ) s["group"]["in"][0].setInput( s["instancerFilters"]["out"] ) s["group"]["in"][1].setInput( s["instancerLights"]["out"] ) s["render"] = GafferArnold.ArnoldRender() s["render"]["in"].setInput( s["group"]["out"] ) with Gaffer.Context() as c : c["scene:render:sceneTranslationOnly"] = IECore.BoolData( True ) s["render"]["task"].execute() def testAbortRaises( self ) : s = Gaffer.ScriptNode() s["plane"] = GafferScene.Plane() s["plane"]["transform"]["translate"]["z"].setValue( -10 ) s["shader"] = GafferArnold.ArnoldShader() s["shader"].loadShader( "image" ) # Missing texture should cause render to abort s["shader"]["parameters"]["filename"].setValue( "iDontExist" ) s["filter"] = GafferScene.PathFilter() s["filter"]["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) ) s["shaderAssignment"] = GafferScene.ShaderAssignment() s["shaderAssignment"]["in"].setInput( s["plane"]["out"] ) s["shaderAssignment"]["filter"].setInput( s["filter"]["out"] ) s["shaderAssignment"]["shader"].setInput( s["shader"]["out"] ) s["outputs"] = GafferScene.Outputs() s["outputs"].addOutput( "beauty", IECoreScene.Output( self.temporaryDirectory() + "/test.tif", "tiff", "rgba", {} ) ) s["outputs"]["in"].setInput( s["shaderAssignment"]["out"] ) s["render"] = GafferArnold.ArnoldRender() s["render"]["in"].setInput( s["outputs"]["out"] ) six.assertRaisesRegex( self, RuntimeError, "Render aborted", s["render"]["task"].execute ) def testOSLShaders( self ) : swizzle = GafferOSL.OSLShader() swizzle.loadShader( "MaterialX/mx_swizzle_color_float" ) swizzle["parameters"]["in"].setValue( imath.Color3f( 0, 0, 1 ) ) swizzle["parameters"]["channels"].setValue( "b" ) pack = GafferOSL.OSLShader() pack.loadShader( "MaterialX/mx_pack_color" ) pack["parameters"]["in1"].setInput( swizzle["out"]["out"] ) ball = GafferArnold.ArnoldShaderBall() ball["shader"].setInput( pack["out"] ) catalogue = GafferImage.Catalogue() outputs = GafferScene.Outputs() outputs.addOutput( "beauty", IECoreScene.Output( "test", "ieDisplay", "rgba", { "driverType" : "ClientDisplayDriver", "displayHost" : "localhost", "displayPort" : str( catalogue.displayDriverServer().portNumber() ), "remoteDisplayType" : "GafferImage::GafferDisplayDriver", } ) ) outputs["in"].setInput( ball["out"] ) render = GafferArnold.ArnoldRender() render["in"].setInput( outputs["out"] ) with GafferTest.ParallelAlgoTest.UIThreadCallHandler() as handler : render["task"].execute() handler.waitFor( 0.1 ) #Just need to let the catalogue update self.assertEqual( self.__color4fAtUV( catalogue, imath.V2f( 0.5 ) ), imath.Color4f( 1, 0, 0, 1 ) ) def testDefaultLightsMistakesDontForceLinking( self ) : light = GafferArnold.ArnoldLight() light.loadShader( "point_light" ) sphere = GafferScene.Sphere() # It doesn't make sense to add a non-light to the "defaultLights" # set like this, but in the event of user error, we don't want to # emit light links unnecessarily. sphereSet = GafferScene.Set() sphereSet["in"].setInput( sphere["out"] ) sphereSet["name"].setValue( "defaultLights" ) sphereSet["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) ) group = GafferScene.Group() group["in"][0].setInput( light["out"] ) group["in"][1].setInput( sphereSet["out"] ) render = GafferArnold.ArnoldRender() render["in"].setInput( group["out"] ) render["mode"].setValue( render.Mode.SceneDescriptionMode ) render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" ) render["task"].execute() with IECoreArnold.UniverseBlock( writable = True ) : arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" ) sphere = arnold.AiNodeLookUpByName( "/group/sphere" ) self.assertIsNotNone( sphere ) self.assertEqual( arnold.AiArrayGetNumElements( arnold.AiNodeGetArray( sphere, "light_group" ) ), 0 ) self.assertFalse( arnold.AiNodeGetBool( sphere, "use_light_group" ) ) def __color4fAtUV( self, image, uv ) : sampler = GafferImage.ImageSampler() sampler["image"].setInput( image["out"] ) dw = image['out']["format"].getValue().getDisplayWindow().size() sampler["pixel"].setValue( uv * imath.V2f( dw.x, dw.y ) ) return sampler["color"].getValue() def __arrayToSet( self, a ) : result = set() for i in range( 0, arnold.AiArrayGetNumElements( a.contents ) ) : if arnold.AiArrayGetType( a.contents ) == arnold.AI_TYPE_STRING : result.add( arnold.AiArrayGetStr( a, i ) ) else : raise TypeError return result def testPerformanceMonitorDoesntCrash( self ) : options = GafferScene.StandardOptions() options["options"]["performanceMonitor"]["value"].setValue( True ) options["options"]["performanceMonitor"]["enabled"].setValue( True ) render = GafferArnold.ArnoldRender() render["in"].setInput( options["out"] ) render["mode"].setValue( render.Mode.SceneDescriptionMode ) render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" ) render["task"].execute() def testShaderSubstitutions( self ) : s = Gaffer.ScriptNode() s["plane"] = GafferScene.Plane() s["planeAttrs"] = GafferScene.CustomAttributes() s["planeAttrs"]["in"].setInput( s["plane"]["out"] ) s["planeAttrs"]["attributes"].addChild( Gaffer.NameValuePlug( "A", Gaffer.StringPlug( "value", defaultValue = 'bar' ) ) ) s["planeAttrs"]["attributes"].addChild( Gaffer.NameValuePlug( "B", Gaffer.StringPlug( "value", defaultValue = 'foo' ) ) ) s["cube"] = GafferScene.Cube() s["cubeAttrs"] = GafferScene.CustomAttributes() s["cubeAttrs"]["in"].setInput( s["cube"]["out"] ) s["cubeAttrs"]["attributes"].addChild( Gaffer.NameValuePlug( "B", Gaffer.StringPlug( "value", defaultValue = 'override' ) ) ) s["parent"] = GafferScene.Parent() s["parent"]["in"].setInput( s["planeAttrs"]["out"] ) s["parent"]["children"][0].setInput( s["cubeAttrs"]["out"] ) s["parent"]["parent"].setValue( "/plane" ) s["shader"] = GafferArnold.ArnoldShader() s["shader"].loadShader( "image" ) s["shader"]["parameters"]["filename"].setValue( "<attr:A>/path/<attr:B>.tx" ) s["filter"] = GafferScene.PathFilter() s["filter"]["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) ) s["shaderAssignment"] = GafferScene.ShaderAssignment() s["shaderAssignment"]["in"].setInput( s["parent"]["out"] ) s["shaderAssignment"]["filter"].setInput( s["filter"]["out"] ) s["shaderAssignment"]["shader"].setInput( s["shader"]["out"] ) s["light"] = GafferArnold.ArnoldLight() s["light"].loadShader( "photometric_light" ) s["light"]["parameters"]["filename"].setValue( "/path/<attr:A>.ies" ) s["goboTexture"] = GafferArnold.ArnoldShader() s["goboTexture"].loadShader( "image" ) s["goboTexture"]["parameters"]["filename"].setValue( "<attr:B>/gobo.tx" ) s["gobo"] = GafferArnold.ArnoldShader() s["gobo"].loadShader( "gobo" ) s["gobo"]["parameters"]["slidemap"].setInput( s["goboTexture"]["out"] ) s["goboAssign"] = GafferScene.ShaderAssignment() s["goboAssign"]["in"].setInput( s["light"]["out"] ) s["goboAssign"]["shader"].setInput( s["gobo"]["out"] ) s["lightBlocker"] = GafferArnold.ArnoldLightFilter() s["lightBlocker"].loadShader( "light_blocker" ) s["lightBlocker"]["parameters"]["geometry_type"].setValue( "<attr:geometryType>" ) s["lightGroup"] = GafferScene.Group() s["lightGroup"]["name"].setValue( "lightGroup" ) s["lightGroup"]["in"][0].setInput( s["goboAssign"]["out"] ) s["lightGroup"]["in"][1].setInput( s["lightBlocker"]["out"] ) s["parent2"] = GafferScene.Parent() s["parent2"]["in"].setInput( s["shaderAssignment"]["out"] ) s["parent2"]["children"][0].setInput( s["lightGroup"]["out"] ) s["parent2"]["parent"].setValue( "/" ) s["globalAttrs"] = GafferScene.CustomAttributes() s["globalAttrs"]["in"].setInput( s["parent2"]["out"] ) s["globalAttrs"]["global"].setValue( True ) s["globalAttrs"]["attributes"].addChild( Gaffer.NameValuePlug( "A", Gaffer.StringPlug( "value", defaultValue = 'default1' ) ) ) s["globalAttrs"]["attributes"].addChild( Gaffer.NameValuePlug( "B", Gaffer.StringPlug( "value", defaultValue = 'default2' ) ) ) s["globalAttrs"]["attributes"].addChild( Gaffer.NameValuePlug( "geometryType", Gaffer.StringPlug( "value", defaultValue = 'cylinder' ) ) ) s["render"] = GafferArnold.ArnoldRender() s["render"]["in"].setInput( s["globalAttrs"]["out"] ) s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode ) s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" ) s["render"]["task"].execute() with IECoreArnold.UniverseBlock( writable = True ) : arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" ) plane = arnold.AiNodeLookUpByName( "/plane" ) shader = arnold.AiNodeGetPtr( plane, "shader" ) self.assertEqual( arnold.AiNodeGetStr( shader, "filename" ), "bar/path/foo.tx" ) cube = arnold.AiNodeLookUpByName( "/plane/cube" ) shader2 = arnold.AiNodeGetPtr( cube, "shader" ) self.assertEqual( arnold.AiNodeGetStr( shader2, "filename" ), "bar/path/override.tx" ) light = arnold.AiNodeLookUpByName( "light:/lightGroup/light" ) self.assertEqual( arnold.AiNodeGetStr( light, "filename" ), "/path/default1.ies" ) gobo = arnold.AiNodeGetPtr( light, "filters" ) goboTex = arnold.AiNodeGetLink( gobo, "slidemap" ) self.assertEqual( arnold.AiNodeGetStr( goboTex, "filename" ), "default2/gobo.tx" ) lightFilter = arnold.AiNodeLookUpByName( "lightFilter:/lightGroup/lightFilter" ) self.assertEqual( arnold.AiNodeGetStr( lightFilter, "geometry_type" ), "cylinder" ) if __name__ == "__main__": unittest.main()
gen_type_activitystreams_leave.go
// Code generated by astool. DO NOT EDIT. package typeleave import ( "fmt" vocab "github.com/go-fed/activity/streams/vocab" "strings" ) // Indicates that the actor has left the object. The target and origin typically // have no meaning. // // Example 20 (https://www.w3.org/TR/activitystreams-vocabulary/#ex18-jsonld): // { // "actor": { // "name": "Sally", // "type": "Person" // }, // "object": { // "name": "Work", // "type": "Place" // }, // "summary": "Sally left work", // "type": "Leave" // } // // Example 21 (https://www.w3.org/TR/activitystreams-vocabulary/#ex19-jsonld): // { // "actor": { // "name": "Sally", // "type": "Person" // }, // "object": { // "name": "A Simple Group", // "type": "Group" // }, // "summary": "Sally left a group", // "type": "Leave" // } type ActivityStreamsLeave struct { ActivityStreamsActor vocab.ActivityStreamsActorProperty ActivityStreamsAltitude vocab.ActivityStreamsAltitudeProperty ActivityStreamsAttachment vocab.ActivityStreamsAttachmentProperty ActivityStreamsAttributedTo vocab.ActivityStreamsAttributedToProperty ActivityStreamsAudience vocab.ActivityStreamsAudienceProperty ActivityStreamsBcc vocab.ActivityStreamsBccProperty ActivityStreamsBto vocab.ActivityStreamsBtoProperty ActivityStreamsCc vocab.ActivityStreamsCcProperty ActivityStreamsContent vocab.ActivityStreamsContentProperty ActivityStreamsContext vocab.ActivityStreamsContextProperty ActivityStreamsDuration vocab.ActivityStreamsDurationProperty ActivityStreamsEndTime vocab.ActivityStreamsEndTimeProperty ActivityStreamsGenerator vocab.ActivityStreamsGeneratorProperty ActivityStreamsIcon vocab.ActivityStreamsIconProperty JSONLDId vocab.JSONLDIdProperty ActivityStreamsImage vocab.ActivityStreamsImageProperty ActivityStreamsInReplyTo vocab.ActivityStreamsInReplyToProperty ActivityStreamsInstrument vocab.ActivityStreamsInstrumentProperty ActivityStreamsLikes vocab.ActivityStreamsLikesProperty ActivityStreamsLocation vocab.ActivityStreamsLocationProperty ActivityStreamsMediaType vocab.ActivityStreamsMediaTypeProperty ActivityStreamsName vocab.ActivityStreamsNameProperty ActivityStreamsObject vocab.ActivityStreamsObjectProperty ActivityStreamsOrigin vocab.ActivityStreamsOriginProperty ActivityStreamsPreview vocab.ActivityStreamsPreviewProperty ActivityStreamsPublished vocab.ActivityStreamsPublishedProperty ActivityStreamsReplies vocab.ActivityStreamsRepliesProperty ActivityStreamsResult vocab.ActivityStreamsResultProperty ActivityStreamsSensitive vocab.ActivityStreamsSensitiveProperty ActivityStreamsShares vocab.ActivityStreamsSharesProperty ActivityStreamsSource vocab.ActivityStreamsSourceProperty ActivityStreamsStartTime vocab.ActivityStreamsStartTimeProperty ActivityStreamsSummary vocab.ActivityStreamsSummaryProperty ActivityStreamsTag vocab.ActivityStreamsTagProperty ActivityStreamsTarget vocab.ActivityStreamsTargetProperty ActivityStreamsTo vocab.ActivityStreamsToProperty JSONLDType vocab.JSONLDTypeProperty ActivityStreamsUpdated vocab.ActivityStreamsUpdatedProperty ActivityStreamsUrl vocab.ActivityStreamsUrlProperty alias string unknown map[string]interface{} } // ActivityStreamsLeaveExtends returns true if the Leave type extends from the // other type. func ActivityStreamsLeaveExtends(other vocab.Type) bool { extensions := []string{"Activity", "Object"} for _, ext := range extensions { if ext == other.GetTypeName() { return true } } return false } // DeserializeLeave creates a Leave from a map representation that has been // unmarshalled from a text or binary format. func DeserializeLeave(m map[string]interface{}, aliasMap map[string]string) (*ActivityStreamsLeave, error) { alias := "" aliasPrefix := "" if a, ok := aliasMap["https://www.w3.org/ns/activitystreams"]; ok { alias = a aliasPrefix = a + ":" } this := &ActivityStreamsLeave{ alias: alias, unknown: make(map[string]interface{}), } if typeValue, ok := m["type"]; !ok { return nil, fmt.Errorf("no \"type\" property in map") } else if typeString, ok := typeValue.(string); ok { typeName := strings.TrimPrefix(typeString, aliasPrefix) if typeName != "Leave" { return nil, fmt.Errorf("\"type\" property is not of %q type: %s", "Leave", typeName) } // Fall through, success in finding a proper Type } else if arrType, ok := typeValue.([]interface{}); ok { found := false for _, elemVal := range arrType { if typeString, ok := elemVal.(string); ok && strings.TrimPrefix(typeString, aliasPrefix) == "Leave" { found = true break } } if !found { return nil, fmt.Errorf("could not find a \"type\" property of value %q", "Leave") } // Fall through, success in finding a proper Type } else { return nil, fmt.Errorf("\"type\" property is unrecognized type: %T", typeValue) } // Begin: Known property deserialization if p, err := mgr.DeserializeActorPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsActor = p } if p, err := mgr.DeserializeAltitudePropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsAltitude = p } if p, err := mgr.DeserializeAttachmentPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsAttachment = p } if p, err := mgr.DeserializeAttributedToPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsAttributedTo = p } if p, err := mgr.DeserializeAudiencePropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsAudience = p } if p, err := mgr.DeserializeBccPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsBcc = p } if p, err := mgr.DeserializeBtoPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsBto = p } if p, err := mgr.DeserializeCcPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsCc = p } if p, err := mgr.DeserializeContentPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsContent = p } if p, err := mgr.DeserializeContextPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsContext = p } if p, err := mgr.DeserializeDurationPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsDuration = p } if p, err := mgr.DeserializeEndTimePropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsEndTime = p } if p, err := mgr.DeserializeGeneratorPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsGenerator = p } if p, err := mgr.DeserializeIconPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsIcon = p } if p, err := mgr.DeserializeIdPropertyJSONLD()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.JSONLDId = p } if p, err := mgr.DeserializeImagePropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsImage = p } if p, err := mgr.DeserializeInReplyToPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsInReplyTo = p } if p, err := mgr.DeserializeInstrumentPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsInstrument = p } if p, err := mgr.DeserializeLikesPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsLikes = p } if p, err := mgr.DeserializeLocationPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsLocation = p } if p, err := mgr.DeserializeMediaTypePropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsMediaType = p } if p, err := mgr.DeserializeNamePropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsName = p } if p, err := mgr.DeserializeObjectPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsObject = p } if p, err := mgr.DeserializeOriginPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsOrigin = p } if p, err := mgr.DeserializePreviewPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsPreview = p } if p, err := mgr.DeserializePublishedPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsPublished = p } if p, err := mgr.DeserializeRepliesPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsReplies = p } if p, err := mgr.DeserializeResultPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsResult = p } if p, err := mgr.DeserializeSensitivePropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsSensitive = p } if p, err := mgr.DeserializeSharesPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsShares = p } if p, err := mgr.DeserializeSourcePropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsSource = p } if p, err := mgr.DeserializeStartTimePropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsStartTime = p } if p, err := mgr.DeserializeSummaryPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsSummary = p } if p, err := mgr.DeserializeTagPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsTag = p } if p, err := mgr.DeserializeTargetPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsTarget = p } if p, err := mgr.DeserializeToPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsTo = p } if p, err := mgr.DeserializeTypePropertyJSONLD()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.JSONLDType = p } if p, err := mgr.DeserializeUpdatedPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsUpdated = p } if p, err := mgr.DeserializeUrlPropertyActivityStreams()(m, aliasMap); err != nil { return nil, err } else if p != nil { this.ActivityStreamsUrl = p } // End: Known property deserialization // Begin: Unknown deserialization for k, v := range m { // Begin: Code that ensures a property name is unknown if k == "actor" { continue } else if k == "altitude" { continue } else if k == "attachment" { continue } else if k == "attributedTo" { continue } else if k == "audience" { continue } else if k == "bcc" { continue } else if k == "bto" { continue } else if k == "cc" { continue } else if k == "content" { continue } else if k == "contentMap" { continue } else if k == "context" { continue } else if k == "duration" { continue } else if k == "endTime" { continue } else if k == "generator" { continue } else if k == "icon" { continue } else if k == "id" { continue } else if k == "image" { continue } else if k == "inReplyTo" { continue } else if k == "instrument" { continue } else if k == "likes" { continue } else if k == "location" { continue } else if k == "mediaType" { continue } else if k == "name" { continue } else if k == "nameMap" { continue } else if k == "object" { continue } else if k == "origin" { continue } else if k == "preview" { continue } else if k == "published" { continue } else if k == "replies" { continue } else if k == "result" { continue } else if k == "sensitive" { continue } else if k == "shares" { continue } else if k == "source" { continue } else if k == "startTime" { continue } else if k == "summary" { continue } else if k == "summaryMap" { continue } else if k == "tag" { continue } else if k == "target" { continue } else if k == "to" { continue } else if k == "type" { continue } else if k == "updated" { continue } else if k == "url" { continue } // End: Code that ensures a property name is unknown this.unknown[k] = v } // End: Unknown deserialization return this, nil } // IsOrExtendsLeave returns true if the other provided type is the Leave type or // extends from the Leave type. func IsOrExtendsLeave(other vocab.Type) bool { if other.GetTypeName() == "Leave" { return true } return LeaveIsExtendedBy(other) } // LeaveIsDisjointWith returns true if the other provided type is disjoint with // the Leave type. func LeaveIsDisjointWith(other vocab.Type) bool { disjointWith := []string{"Hashtag", "Link", "Mention"} for _, disjoint := range disjointWith { if disjoint == other.GetTypeName() { return true } } return false } // LeaveIsExtendedBy returns true if the other provided type extends from the // Leave type. Note that it returns false if the types are the same; see the // "IsOrExtendsLeave" variant instead. func LeaveIsExtendedBy(other vocab.Type) bool { // Shortcut implementation: is not extended by anything. return false } // NewActivityStreamsLeave creates a new Leave type func NewActivityStreamsLeave() *ActivityStreamsLeave { typeProp := typePropertyConstructor() typeProp.AppendXMLSchemaString("Leave") return &ActivityStreamsLeave{ JSONLDType: typeProp, alias: "", unknown: make(map[string]interface{}), } } // GetActivityStreamsActor returns the "actor" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsActor() vocab.ActivityStreamsActorProperty { return this.ActivityStreamsActor } // GetActivityStreamsAltitude returns the "altitude" property if it exists, and // nil otherwise. func (this ActivityStreamsLeave) GetActivityStreamsAltitude() vocab.ActivityStreamsAltitudeProperty { return this.ActivityStreamsAltitude } // GetActivityStreamsAttachment returns the "attachment" property if it exists, // and nil otherwise. func (this ActivityStreamsLeave) GetActivityStreamsAttachment() vocab.ActivityStreamsAttachmentProperty { return this.ActivityStreamsAttachment } // GetActivityStreamsAttributedTo returns the "attributedTo" property if it // exists, and nil otherwise. func (this ActivityStreamsLeave) GetActivityStreamsAttributedTo() vocab.ActivityStreamsAttributedToProperty { return this.ActivityStreamsAttributedTo } // GetActivityStreamsAudience returns the "audience" property if it exists, and // nil otherwise. func (this ActivityStreamsLeave) GetActivityStreamsAudience() vocab.ActivityStreamsAudienceProperty { return this.ActivityStreamsAudience } // GetActivityStreamsBcc returns the "bcc" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsBcc() vocab.ActivityStreamsBccProperty { return this.ActivityStreamsBcc } // GetActivityStreamsBto returns the "bto" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsBto() vocab.ActivityStreamsBtoProperty { return this.ActivityStreamsBto } // GetActivityStreamsCc returns the "cc" property if it exists, and nil otherwise. func (this ActivityStreamsLeave) GetActivityStreamsCc() vocab.ActivityStreamsCcProperty { return this.ActivityStreamsCc } // GetActivityStreamsContent returns the "content" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsContent() vocab.ActivityStreamsContentProperty { return this.ActivityStreamsContent } // GetActivityStreamsContext returns the "context" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsContext() vocab.ActivityStreamsContextProperty { return this.ActivityStreamsContext } // GetActivityStreamsDuration returns the "duration" property if it exists, and // nil otherwise. func (this ActivityStreamsLeave) GetActivityStreamsDuration() vocab.ActivityStreamsDurationProperty { return this.ActivityStreamsDuration } // GetActivityStreamsEndTime returns the "endTime" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsEndTime() vocab.ActivityStreamsEndTimeProperty { return this.ActivityStreamsEndTime } // GetActivityStreamsGenerator returns the "generator" property if it exists, and // nil otherwise. func (this ActivityStreamsLeave) GetActivityStreamsGenerator() vocab.ActivityStreamsGeneratorProperty { return this.ActivityStreamsGenerator } // GetActivityStreamsIcon returns the "icon" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsIcon() vocab.ActivityStreamsIconProperty { return this.ActivityStreamsIcon } // GetActivityStreamsImage returns the "image" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsImage() vocab.ActivityStreamsImageProperty { return this.ActivityStreamsImage } // GetActivityStreamsInReplyTo returns the "inReplyTo" property if it exists, and // nil otherwise. func (this ActivityStreamsLeave) GetActivityStreamsInReplyTo() vocab.ActivityStreamsInReplyToProperty { return this.ActivityStreamsInReplyTo } // GetActivityStreamsInstrument returns the "instrument" property if it exists, // and nil otherwise. func (this ActivityStreamsLeave) GetActivityStreamsInstrument() vocab.ActivityStreamsInstrumentProperty { return this.ActivityStreamsInstrument } // GetActivityStreamsLikes returns the "likes" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsLikes() vocab.ActivityStreamsLikesProperty { return this.ActivityStreamsLikes } // GetActivityStreamsLocation returns the "location" property if it exists, and // nil otherwise. func (this ActivityStreamsLeave) GetActivityStreamsLocation() vocab.ActivityStreamsLocationProperty { return this.ActivityStreamsLocation } // GetActivityStreamsMediaType returns the "mediaType" property if it exists, and // nil otherwise. func (this ActivityStreamsLeave) GetActivityStreamsMediaType() vocab.ActivityStreamsMediaTypeProperty { return this.ActivityStreamsMediaType } // GetActivityStreamsName returns the "name" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsName() vocab.ActivityStreamsNameProperty { return this.ActivityStreamsName } // GetActivityStreamsObject returns the "object" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsObject() vocab.ActivityStreamsObjectProperty { return this.ActivityStreamsObject } // GetActivityStreamsOrigin returns the "origin" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsOrigin() vocab.ActivityStreamsOriginProperty { return this.ActivityStreamsOrigin } // GetActivityStreamsPreview returns the "preview" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsPreview() vocab.ActivityStreamsPreviewProperty { return this.ActivityStreamsPreview } // GetActivityStreamsPublished returns the "published" property if it exists, and // nil otherwise. func (this ActivityStreamsLeave) GetActivityStreamsPublished() vocab.ActivityStreamsPublishedProperty { return this.ActivityStreamsPublished } // GetActivityStreamsReplies returns the "replies" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsReplies() vocab.ActivityStreamsRepliesProperty { return this.ActivityStreamsReplies } // GetActivityStreamsResult returns the "result" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsResult() vocab.ActivityStreamsResultProperty { return this.ActivityStreamsResult } // GetActivityStreamsSensitive returns the "sensitive" property if it exists, and // nil otherwise. func (this ActivityStreamsLeave) GetActivityStreamsSensitive() vocab.ActivityStreamsSensitiveProperty { return this.ActivityStreamsSensitive } // GetActivityStreamsShares returns the "shares" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsShares() vocab.ActivityStreamsSharesProperty { return this.ActivityStreamsShares } // GetActivityStreamsSource returns the "source" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsSource() vocab.ActivityStreamsSourceProperty { return this.ActivityStreamsSource } // GetActivityStreamsStartTime returns the "startTime" property if it exists, and // nil otherwise. func (this ActivityStreamsLeave) GetActivityStreamsStartTime() vocab.ActivityStreamsStartTimeProperty { return this.ActivityStreamsStartTime } // GetActivityStreamsSummary returns the "summary" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsSummary() vocab.ActivityStreamsSummaryProperty { return this.ActivityStreamsSummary } // GetActivityStreamsTag returns the "tag" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsTag() vocab.ActivityStreamsTagProperty { return this.ActivityStreamsTag } // GetActivityStreamsTarget returns the "target" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsTarget() vocab.ActivityStreamsTargetProperty { return this.ActivityStreamsTarget } // GetActivityStreamsTo returns the "to" property if it exists, and nil otherwise. func (this ActivityStreamsLeave) GetActivityStreamsTo() vocab.ActivityStreamsToProperty { return this.ActivityStreamsTo } // GetActivityStreamsUpdated returns the "updated" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsUpdated() vocab.ActivityStreamsUpdatedProperty { return this.ActivityStreamsUpdated } // GetActivityStreamsUrl returns the "url" property if it exists, and nil // otherwise. func (this ActivityStreamsLeave) GetActivityStreamsUrl() vocab.ActivityStreamsUrlProperty { return this.ActivityStreamsUrl } // GetJSONLDId returns the "id" property if it exists, and nil otherwise. func (this ActivityStreamsLeave) GetJSONLDId() vocab.JSONLDIdProperty { return this.JSONLDId } // GetJSONLDType returns the "type" property if it exists, and nil otherwise. func (this ActivityStreamsLeave) GetJSONLDType() vocab.JSONLDTypeProperty { return this.JSONLDType } // GetTypeName returns the name of this type. func (this ActivityStreamsLeave) GetTypeName() string { return "Leave" } // GetUnknownProperties returns the unknown properties for the Leave type. Note // that this should not be used by app developers. It is only used to help // determine which implementation is LessThan the other. Developers who are // creating a different implementation of this type's interface can use this // method in their LessThan implementation, but routine ActivityPub // applications should not use this to bypass the code generation tool. func (this ActivityStreamsLeave) GetUnknownProperties() map[string]interface{} { return this.unknown } // IsExtending returns true if the Leave type extends from the other type. func (this ActivityStreamsLeave) IsExtending(other vocab.Type) bool { return ActivityStreamsLeaveExtends(other) } // JSONLDContext returns the JSONLD URIs required in the context string for this // type and the specific properties that are set. The value in the map is the // alias used to import the type and its properties. func (this ActivityStreamsLeave) JSONLDContext() map[string]string { m := map[string]string{"https://www.w3.org/ns/activitystreams": this.alias} m = this.helperJSONLDContext(this.ActivityStreamsActor, m) m = this.helperJSONLDContext(this.ActivityStreamsAltitude, m) m = this.helperJSONLDContext(this.ActivityStreamsAttachment, m) m = this.helperJSONLDContext(this.ActivityStreamsAttributedTo, m) m = this.helperJSONLDContext(this.ActivityStreamsAudience, m) m = this.helperJSONLDContext(this.ActivityStreamsBcc, m) m = this.helperJSONLDContext(this.ActivityStreamsBto, m) m = this.helperJSONLDContext(this.ActivityStreamsCc, m) m = this.helperJSONLDContext(this.ActivityStreamsContent, m) m = this.helperJSONLDContext(this.ActivityStreamsContext, m) m = this.helperJSONLDContext(this.ActivityStreamsDuration, m) m = this.helperJSONLDContext(this.ActivityStreamsEndTime, m) m = this.helperJSONLDContext(this.ActivityStreamsGenerator, m) m = this.helperJSONLDContext(this.ActivityStreamsIcon, m) m = this.helperJSONLDContext(this.JSONLDId, m) m = this.helperJSONLDContext(this.ActivityStreamsImage, m) m = this.helperJSONLDContext(this.ActivityStreamsInReplyTo, m) m = this.helperJSONLDContext(this.ActivityStreamsInstrument, m) m = this.helperJSONLDContext(this.ActivityStreamsLikes, m) m = this.helperJSONLDContext(this.ActivityStreamsLocation, m) m = this.helperJSONLDContext(this.ActivityStreamsMediaType, m) m = this.helperJSONLDContext(this.ActivityStreamsName, m) m = this.helperJSONLDContext(this.ActivityStreamsObject, m) m = this.helperJSONLDContext(this.ActivityStreamsOrigin, m) m = this.helperJSONLDContext(this.ActivityStreamsPreview, m) m = this.helperJSONLDContext(this.ActivityStreamsPublished, m) m = this.helperJSONLDContext(this.ActivityStreamsReplies, m) m = this.helperJSONLDContext(this.ActivityStreamsResult, m) m = this.helperJSONLDContext(this.ActivityStreamsSensitive, m) m = this.helperJSONLDContext(this.ActivityStreamsShares, m) m = this.helperJSONLDContext(this.ActivityStreamsSource, m) m = this.helperJSONLDContext(this.ActivityStreamsStartTime, m) m = this.helperJSONLDContext(this.ActivityStreamsSummary, m) m = this.helperJSONLDContext(this.ActivityStreamsTag, m) m = this.helperJSONLDContext(this.ActivityStreamsTarget, m) m = this.helperJSONLDContext(this.ActivityStreamsTo, m) m = this.helperJSONLDContext(this.JSONLDType, m) m = this.helperJSONLDContext(this.ActivityStreamsUpdated, m) m = this.helperJSONLDContext(this.ActivityStreamsUrl, m) return m } // LessThan computes if this Leave is lesser, with an arbitrary but stable // determination. func (this ActivityStreamsLeave) LessThan(o vocab.ActivityStreamsLeave) bool { // Begin: Compare known properties // Compare property "actor" if lhs, rhs := this.ActivityStreamsActor, o.GetActivityStreamsActor(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "altitude" if lhs, rhs := this.ActivityStreamsAltitude, o.GetActivityStreamsAltitude(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "attachment" if lhs, rhs := this.ActivityStreamsAttachment, o.GetActivityStreamsAttachment(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "attributedTo" if lhs, rhs := this.ActivityStreamsAttributedTo, o.GetActivityStreamsAttributedTo(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "audience" if lhs, rhs := this.ActivityStreamsAudience, o.GetActivityStreamsAudience(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "bcc" if lhs, rhs := this.ActivityStreamsBcc, o.GetActivityStreamsBcc(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "bto" if lhs, rhs := this.ActivityStreamsBto, o.GetActivityStreamsBto(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "cc" if lhs, rhs := this.ActivityStreamsCc, o.GetActivityStreamsCc(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "content" if lhs, rhs := this.ActivityStreamsContent, o.GetActivityStreamsContent(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "context" if lhs, rhs := this.ActivityStreamsContext, o.GetActivityStreamsContext(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "duration" if lhs, rhs := this.ActivityStreamsDuration, o.GetActivityStreamsDuration(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "endTime" if lhs, rhs := this.ActivityStreamsEndTime, o.GetActivityStreamsEndTime(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "generator" if lhs, rhs := this.ActivityStreamsGenerator, o.GetActivityStreamsGenerator(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "icon" if lhs, rhs := this.ActivityStreamsIcon, o.GetActivityStreamsIcon(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "id" if lhs, rhs := this.JSONLDId, o.GetJSONLDId(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "image" if lhs, rhs := this.ActivityStreamsImage, o.GetActivityStreamsImage(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "inReplyTo" if lhs, rhs := this.ActivityStreamsInReplyTo, o.GetActivityStreamsInReplyTo(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "instrument" if lhs, rhs := this.ActivityStreamsInstrument, o.GetActivityStreamsInstrument(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "likes" if lhs, rhs := this.ActivityStreamsLikes, o.GetActivityStreamsLikes(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "location" if lhs, rhs := this.ActivityStreamsLocation, o.GetActivityStreamsLocation(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "mediaType" if lhs, rhs := this.ActivityStreamsMediaType, o.GetActivityStreamsMediaType(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) {
} else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "name" if lhs, rhs := this.ActivityStreamsName, o.GetActivityStreamsName(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "object" if lhs, rhs := this.ActivityStreamsObject, o.GetActivityStreamsObject(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "origin" if lhs, rhs := this.ActivityStreamsOrigin, o.GetActivityStreamsOrigin(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "preview" if lhs, rhs := this.ActivityStreamsPreview, o.GetActivityStreamsPreview(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "published" if lhs, rhs := this.ActivityStreamsPublished, o.GetActivityStreamsPublished(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "replies" if lhs, rhs := this.ActivityStreamsReplies, o.GetActivityStreamsReplies(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "result" if lhs, rhs := this.ActivityStreamsResult, o.GetActivityStreamsResult(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "sensitive" if lhs, rhs := this.ActivityStreamsSensitive, o.GetActivityStreamsSensitive(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "shares" if lhs, rhs := this.ActivityStreamsShares, o.GetActivityStreamsShares(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "source" if lhs, rhs := this.ActivityStreamsSource, o.GetActivityStreamsSource(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "startTime" if lhs, rhs := this.ActivityStreamsStartTime, o.GetActivityStreamsStartTime(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "summary" if lhs, rhs := this.ActivityStreamsSummary, o.GetActivityStreamsSummary(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "tag" if lhs, rhs := this.ActivityStreamsTag, o.GetActivityStreamsTag(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "target" if lhs, rhs := this.ActivityStreamsTarget, o.GetActivityStreamsTarget(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "to" if lhs, rhs := this.ActivityStreamsTo, o.GetActivityStreamsTo(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "type" if lhs, rhs := this.JSONLDType, o.GetJSONLDType(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "updated" if lhs, rhs := this.ActivityStreamsUpdated, o.GetActivityStreamsUpdated(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // Compare property "url" if lhs, rhs := this.ActivityStreamsUrl, o.GetActivityStreamsUrl(); lhs != nil && rhs != nil { if lhs.LessThan(rhs) { return true } else if rhs.LessThan(lhs) { return false } } else if lhs == nil && rhs != nil { // Nil is less than anything else return true } else if rhs != nil && rhs == nil { // Anything else is greater than nil return false } // Else: Both are nil // End: Compare known properties // Begin: Compare unknown properties (only by number of them) if len(this.unknown) < len(o.GetUnknownProperties()) { return true } else if len(o.GetUnknownProperties()) < len(this.unknown) { return false } // End: Compare unknown properties (only by number of them) // All properties are the same. return false } // Serialize converts this into an interface representation suitable for // marshalling into a text or binary format. func (this ActivityStreamsLeave) Serialize() (map[string]interface{}, error) { m := make(map[string]interface{}) typeName := "Leave" if len(this.alias) > 0 { typeName = this.alias + ":" + "Leave" } m["type"] = typeName // Begin: Serialize known properties // Maybe serialize property "actor" if this.ActivityStreamsActor != nil { if i, err := this.ActivityStreamsActor.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsActor.Name()] = i } } // Maybe serialize property "altitude" if this.ActivityStreamsAltitude != nil { if i, err := this.ActivityStreamsAltitude.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsAltitude.Name()] = i } } // Maybe serialize property "attachment" if this.ActivityStreamsAttachment != nil { if i, err := this.ActivityStreamsAttachment.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsAttachment.Name()] = i } } // Maybe serialize property "attributedTo" if this.ActivityStreamsAttributedTo != nil { if i, err := this.ActivityStreamsAttributedTo.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsAttributedTo.Name()] = i } } // Maybe serialize property "audience" if this.ActivityStreamsAudience != nil { if i, err := this.ActivityStreamsAudience.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsAudience.Name()] = i } } // Maybe serialize property "bcc" if this.ActivityStreamsBcc != nil { if i, err := this.ActivityStreamsBcc.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsBcc.Name()] = i } } // Maybe serialize property "bto" if this.ActivityStreamsBto != nil { if i, err := this.ActivityStreamsBto.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsBto.Name()] = i } } // Maybe serialize property "cc" if this.ActivityStreamsCc != nil { if i, err := this.ActivityStreamsCc.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsCc.Name()] = i } } // Maybe serialize property "content" if this.ActivityStreamsContent != nil { if i, err := this.ActivityStreamsContent.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsContent.Name()] = i } } // Maybe serialize property "context" if this.ActivityStreamsContext != nil { if i, err := this.ActivityStreamsContext.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsContext.Name()] = i } } // Maybe serialize property "duration" if this.ActivityStreamsDuration != nil { if i, err := this.ActivityStreamsDuration.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsDuration.Name()] = i } } // Maybe serialize property "endTime" if this.ActivityStreamsEndTime != nil { if i, err := this.ActivityStreamsEndTime.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsEndTime.Name()] = i } } // Maybe serialize property "generator" if this.ActivityStreamsGenerator != nil { if i, err := this.ActivityStreamsGenerator.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsGenerator.Name()] = i } } // Maybe serialize property "icon" if this.ActivityStreamsIcon != nil { if i, err := this.ActivityStreamsIcon.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsIcon.Name()] = i } } // Maybe serialize property "id" if this.JSONLDId != nil { if i, err := this.JSONLDId.Serialize(); err != nil { return nil, err } else if i != nil { m[this.JSONLDId.Name()] = i } } // Maybe serialize property "image" if this.ActivityStreamsImage != nil { if i, err := this.ActivityStreamsImage.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsImage.Name()] = i } } // Maybe serialize property "inReplyTo" if this.ActivityStreamsInReplyTo != nil { if i, err := this.ActivityStreamsInReplyTo.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsInReplyTo.Name()] = i } } // Maybe serialize property "instrument" if this.ActivityStreamsInstrument != nil { if i, err := this.ActivityStreamsInstrument.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsInstrument.Name()] = i } } // Maybe serialize property "likes" if this.ActivityStreamsLikes != nil { if i, err := this.ActivityStreamsLikes.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsLikes.Name()] = i } } // Maybe serialize property "location" if this.ActivityStreamsLocation != nil { if i, err := this.ActivityStreamsLocation.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsLocation.Name()] = i } } // Maybe serialize property "mediaType" if this.ActivityStreamsMediaType != nil { if i, err := this.ActivityStreamsMediaType.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsMediaType.Name()] = i } } // Maybe serialize property "name" if this.ActivityStreamsName != nil { if i, err := this.ActivityStreamsName.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsName.Name()] = i } } // Maybe serialize property "object" if this.ActivityStreamsObject != nil { if i, err := this.ActivityStreamsObject.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsObject.Name()] = i } } // Maybe serialize property "origin" if this.ActivityStreamsOrigin != nil { if i, err := this.ActivityStreamsOrigin.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsOrigin.Name()] = i } } // Maybe serialize property "preview" if this.ActivityStreamsPreview != nil { if i, err := this.ActivityStreamsPreview.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsPreview.Name()] = i } } // Maybe serialize property "published" if this.ActivityStreamsPublished != nil { if i, err := this.ActivityStreamsPublished.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsPublished.Name()] = i } } // Maybe serialize property "replies" if this.ActivityStreamsReplies != nil { if i, err := this.ActivityStreamsReplies.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsReplies.Name()] = i } } // Maybe serialize property "result" if this.ActivityStreamsResult != nil { if i, err := this.ActivityStreamsResult.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsResult.Name()] = i } } // Maybe serialize property "sensitive" if this.ActivityStreamsSensitive != nil { if i, err := this.ActivityStreamsSensitive.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsSensitive.Name()] = i } } // Maybe serialize property "shares" if this.ActivityStreamsShares != nil { if i, err := this.ActivityStreamsShares.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsShares.Name()] = i } } // Maybe serialize property "source" if this.ActivityStreamsSource != nil { if i, err := this.ActivityStreamsSource.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsSource.Name()] = i } } // Maybe serialize property "startTime" if this.ActivityStreamsStartTime != nil { if i, err := this.ActivityStreamsStartTime.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsStartTime.Name()] = i } } // Maybe serialize property "summary" if this.ActivityStreamsSummary != nil { if i, err := this.ActivityStreamsSummary.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsSummary.Name()] = i } } // Maybe serialize property "tag" if this.ActivityStreamsTag != nil { if i, err := this.ActivityStreamsTag.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsTag.Name()] = i } } // Maybe serialize property "target" if this.ActivityStreamsTarget != nil { if i, err := this.ActivityStreamsTarget.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsTarget.Name()] = i } } // Maybe serialize property "to" if this.ActivityStreamsTo != nil { if i, err := this.ActivityStreamsTo.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsTo.Name()] = i } } // Maybe serialize property "type" if this.JSONLDType != nil { if i, err := this.JSONLDType.Serialize(); err != nil { return nil, err } else if i != nil { m[this.JSONLDType.Name()] = i } } // Maybe serialize property "updated" if this.ActivityStreamsUpdated != nil { if i, err := this.ActivityStreamsUpdated.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsUpdated.Name()] = i } } // Maybe serialize property "url" if this.ActivityStreamsUrl != nil { if i, err := this.ActivityStreamsUrl.Serialize(); err != nil { return nil, err } else if i != nil { m[this.ActivityStreamsUrl.Name()] = i } } // End: Serialize known properties // Begin: Serialize unknown properties for k, v := range this.unknown { // To be safe, ensure we aren't overwriting a known property if _, has := m[k]; !has { m[k] = v } } // End: Serialize unknown properties return m, nil } // SetActivityStreamsActor sets the "actor" property. func (this *ActivityStreamsLeave) SetActivityStreamsActor(i vocab.ActivityStreamsActorProperty) { this.ActivityStreamsActor = i } // SetActivityStreamsAltitude sets the "altitude" property. func (this *ActivityStreamsLeave) SetActivityStreamsAltitude(i vocab.ActivityStreamsAltitudeProperty) { this.ActivityStreamsAltitude = i } // SetActivityStreamsAttachment sets the "attachment" property. func (this *ActivityStreamsLeave) SetActivityStreamsAttachment(i vocab.ActivityStreamsAttachmentProperty) { this.ActivityStreamsAttachment = i } // SetActivityStreamsAttributedTo sets the "attributedTo" property. func (this *ActivityStreamsLeave) SetActivityStreamsAttributedTo(i vocab.ActivityStreamsAttributedToProperty) { this.ActivityStreamsAttributedTo = i } // SetActivityStreamsAudience sets the "audience" property. func (this *ActivityStreamsLeave) SetActivityStreamsAudience(i vocab.ActivityStreamsAudienceProperty) { this.ActivityStreamsAudience = i } // SetActivityStreamsBcc sets the "bcc" property. func (this *ActivityStreamsLeave) SetActivityStreamsBcc(i vocab.ActivityStreamsBccProperty) { this.ActivityStreamsBcc = i } // SetActivityStreamsBto sets the "bto" property. func (this *ActivityStreamsLeave) SetActivityStreamsBto(i vocab.ActivityStreamsBtoProperty) { this.ActivityStreamsBto = i } // SetActivityStreamsCc sets the "cc" property. func (this *ActivityStreamsLeave) SetActivityStreamsCc(i vocab.ActivityStreamsCcProperty) { this.ActivityStreamsCc = i } // SetActivityStreamsContent sets the "content" property. func (this *ActivityStreamsLeave) SetActivityStreamsContent(i vocab.ActivityStreamsContentProperty) { this.ActivityStreamsContent = i } // SetActivityStreamsContext sets the "context" property. func (this *ActivityStreamsLeave) SetActivityStreamsContext(i vocab.ActivityStreamsContextProperty) { this.ActivityStreamsContext = i } // SetActivityStreamsDuration sets the "duration" property. func (this *ActivityStreamsLeave) SetActivityStreamsDuration(i vocab.ActivityStreamsDurationProperty) { this.ActivityStreamsDuration = i } // SetActivityStreamsEndTime sets the "endTime" property. func (this *ActivityStreamsLeave) SetActivityStreamsEndTime(i vocab.ActivityStreamsEndTimeProperty) { this.ActivityStreamsEndTime = i } // SetActivityStreamsGenerator sets the "generator" property. func (this *ActivityStreamsLeave) SetActivityStreamsGenerator(i vocab.ActivityStreamsGeneratorProperty) { this.ActivityStreamsGenerator = i } // SetActivityStreamsIcon sets the "icon" property. func (this *ActivityStreamsLeave) SetActivityStreamsIcon(i vocab.ActivityStreamsIconProperty) { this.ActivityStreamsIcon = i } // SetActivityStreamsImage sets the "image" property. func (this *ActivityStreamsLeave) SetActivityStreamsImage(i vocab.ActivityStreamsImageProperty) { this.ActivityStreamsImage = i } // SetActivityStreamsInReplyTo sets the "inReplyTo" property. func (this *ActivityStreamsLeave) SetActivityStreamsInReplyTo(i vocab.ActivityStreamsInReplyToProperty) { this.ActivityStreamsInReplyTo = i } // SetActivityStreamsInstrument sets the "instrument" property. func (this *ActivityStreamsLeave) SetActivityStreamsInstrument(i vocab.ActivityStreamsInstrumentProperty) { this.ActivityStreamsInstrument = i } // SetActivityStreamsLikes sets the "likes" property. func (this *ActivityStreamsLeave) SetActivityStreamsLikes(i vocab.ActivityStreamsLikesProperty) { this.ActivityStreamsLikes = i } // SetActivityStreamsLocation sets the "location" property. func (this *ActivityStreamsLeave) SetActivityStreamsLocation(i vocab.ActivityStreamsLocationProperty) { this.ActivityStreamsLocation = i } // SetActivityStreamsMediaType sets the "mediaType" property. func (this *ActivityStreamsLeave) SetActivityStreamsMediaType(i vocab.ActivityStreamsMediaTypeProperty) { this.ActivityStreamsMediaType = i } // SetActivityStreamsName sets the "name" property. func (this *ActivityStreamsLeave) SetActivityStreamsName(i vocab.ActivityStreamsNameProperty) { this.ActivityStreamsName = i } // SetActivityStreamsObject sets the "object" property. func (this *ActivityStreamsLeave) SetActivityStreamsObject(i vocab.ActivityStreamsObjectProperty) { this.ActivityStreamsObject = i } // SetActivityStreamsOrigin sets the "origin" property. func (this *ActivityStreamsLeave) SetActivityStreamsOrigin(i vocab.ActivityStreamsOriginProperty) { this.ActivityStreamsOrigin = i } // SetActivityStreamsPreview sets the "preview" property. func (this *ActivityStreamsLeave) SetActivityStreamsPreview(i vocab.ActivityStreamsPreviewProperty) { this.ActivityStreamsPreview = i } // SetActivityStreamsPublished sets the "published" property. func (this *ActivityStreamsLeave) SetActivityStreamsPublished(i vocab.ActivityStreamsPublishedProperty) { this.ActivityStreamsPublished = i } // SetActivityStreamsReplies sets the "replies" property. func (this *ActivityStreamsLeave) SetActivityStreamsReplies(i vocab.ActivityStreamsRepliesProperty) { this.ActivityStreamsReplies = i } // SetActivityStreamsResult sets the "result" property. func (this *ActivityStreamsLeave) SetActivityStreamsResult(i vocab.ActivityStreamsResultProperty) { this.ActivityStreamsResult = i } // SetActivityStreamsSensitive sets the "sensitive" property. func (this *ActivityStreamsLeave) SetActivityStreamsSensitive(i vocab.ActivityStreamsSensitiveProperty) { this.ActivityStreamsSensitive = i } // SetActivityStreamsShares sets the "shares" property. func (this *ActivityStreamsLeave) SetActivityStreamsShares(i vocab.ActivityStreamsSharesProperty) { this.ActivityStreamsShares = i } // SetActivityStreamsSource sets the "source" property. func (this *ActivityStreamsLeave) SetActivityStreamsSource(i vocab.ActivityStreamsSourceProperty) { this.ActivityStreamsSource = i } // SetActivityStreamsStartTime sets the "startTime" property. func (this *ActivityStreamsLeave) SetActivityStreamsStartTime(i vocab.ActivityStreamsStartTimeProperty) { this.ActivityStreamsStartTime = i } // SetActivityStreamsSummary sets the "summary" property. func (this *ActivityStreamsLeave) SetActivityStreamsSummary(i vocab.ActivityStreamsSummaryProperty) { this.ActivityStreamsSummary = i } // SetActivityStreamsTag sets the "tag" property. func (this *ActivityStreamsLeave) SetActivityStreamsTag(i vocab.ActivityStreamsTagProperty) { this.ActivityStreamsTag = i } // SetActivityStreamsTarget sets the "target" property. func (this *ActivityStreamsLeave) SetActivityStreamsTarget(i vocab.ActivityStreamsTargetProperty) { this.ActivityStreamsTarget = i } // SetActivityStreamsTo sets the "to" property. func (this *ActivityStreamsLeave) SetActivityStreamsTo(i vocab.ActivityStreamsToProperty) { this.ActivityStreamsTo = i } // SetActivityStreamsUpdated sets the "updated" property. func (this *ActivityStreamsLeave) SetActivityStreamsUpdated(i vocab.ActivityStreamsUpdatedProperty) { this.ActivityStreamsUpdated = i } // SetActivityStreamsUrl sets the "url" property. func (this *ActivityStreamsLeave) SetActivityStreamsUrl(i vocab.ActivityStreamsUrlProperty) { this.ActivityStreamsUrl = i } // SetJSONLDId sets the "id" property. func (this *ActivityStreamsLeave) SetJSONLDId(i vocab.JSONLDIdProperty) { this.JSONLDId = i } // SetJSONLDType sets the "type" property. func (this *ActivityStreamsLeave) SetJSONLDType(i vocab.JSONLDTypeProperty) { this.JSONLDType = i } // VocabularyURI returns the vocabulary's URI as a string. func (this ActivityStreamsLeave) VocabularyURI() string { return "https://www.w3.org/ns/activitystreams" } // helperJSONLDContext obtains the context uris and their aliases from a property, // if it is not nil. func (this ActivityStreamsLeave) helperJSONLDContext(i jsonldContexter, toMerge map[string]string) map[string]string { if i == nil { return toMerge } for k, v := range i.JSONLDContext() { /* Since the literal maps in this function are determined at code-generation time, this loop should not overwrite an existing key with a new value. */ toMerge[k] = v } return toMerge }
return true
errors.rs
// _ _ // | |_| |__ ___ ___ __ _ // | __| '_ \ / _ \/ __/ _` | // | |_| | | | __/ (_| (_| | // \__|_| |_|\___|\___\__,_| // // licensed under the MIT license <http://opensource.org/licenses/MIT>
use std::fmt; use std::convert::From; use std::error::Error as StdError; use std::io::Error as IoError; use std::string::FromUtf8Error; use std::time::SystemTimeError; use time::ParseError; use crypto::symmetriccipher::SymmetricCipherError; use rustc_serialize::json::EncoderError; use docopt; use term; pub type Result<T> = ::std::result::Result<T, Error>; #[derive(Debug)] pub enum ErrorKind { Term(term::Error), InternalIo(IoError), Generic, } #[derive(Debug)] pub struct Error { pub kind: ErrorKind, pub desc: String, pub detail: Option<String>, } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", &self.desc) } } impl StdError for Error { fn description(&self) -> &str { &self.desc } fn cause(&self) -> Option<&dyn StdError> { match self.kind { ErrorKind::Term(ref e) => Some(e), ErrorKind::InternalIo(ref e) => Some(e), _ => None, } } } macro_rules! specific_fail { ($short:expr) => {{ use errors::ErrorKind; Err(::std::convert::From::from( Error { kind: ErrorKind::Generic, desc: $short, detail: None } )) }} } macro_rules! specific_fail_str { ($s:expr) => { specific_fail!($s.to_string()) } } macro_rules! try_errno { ($e:expr) => { { if $e != 0 { return Err( ::std::convert::From::from( IoError::last_os_error() ) ); } } } } impl From<EncoderError> for Error { fn from(err: EncoderError) -> Error { Error { kind: ErrorKind::Generic, desc: err.to_string(), detail: None, } } } impl From<IoError> for Error { fn from(err: IoError) -> Error { Error { kind: ErrorKind::Generic, desc: err.to_string().into(), detail: None, } } } impl From<term::Error> for Error { fn from(err: term::Error) -> Error { Error { desc: err.to_string().into(), kind: ErrorKind::Term(err), detail: None, } } } impl From<(ErrorKind, &'static str)> for Error { fn from((kind, desc): (ErrorKind, &'static str)) -> Error { Error { kind: kind, desc: desc.to_string(), detail: None, } } } impl From<SystemTimeError> for Error { fn from(err: SystemTimeError) -> Error { Error { kind: ErrorKind::Generic, desc: err.to_string().into(), detail: None, } } } impl From<ParseError> for Error { fn from(err: ParseError) -> Error { Error { kind: ErrorKind::Generic, desc: format!("time parsing error: {}", err), detail: None, } } } impl From<FromUtf8Error> for Error { fn from(err: FromUtf8Error) -> Error { Error { kind: ErrorKind::Generic, desc: format!("is this profile encrypted? ({})", err), detail: None, } } } impl From<SymmetricCipherError> for Error { fn from(_: SymmetricCipherError) -> Error { Error { kind: ErrorKind::Generic, desc: "invalid encryption key".to_string(), detail: None, } } } impl From<docopt::Error> for Error { fn from(err: docopt::Error) -> Error { Error { kind: ErrorKind::Generic, desc: err.to_string(), detail: None, } } } impl From<fmt::Error> for Error { fn from(_: fmt::Error) -> Error { Error { kind: ErrorKind::Generic, desc: "formatting error".to_string(), detail: None, } } }
// // errors.rs // definitions for Error, a catch-all for converting various // lib errors.
config.go
// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. // See License.txt for license information. package commands import ( "bytes" "encoding/json" "fmt" "os" "reflect" "strconv" "strings" "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/mattermost/mattermost-server/mlog" "github.com/mattermost/mattermost-server/model" "github.com/mattermost/mattermost-server/utils" ) var ConfigCmd = &cobra.Command{ Use: "config", Short: "Configuration", } var ValidateConfigCmd = &cobra.Command{ Use: "validate", Short: "Validate config file", Long: "If the config file is valid, this command will output a success message and have a zero exit code. If it is invalid, this command will output an error and have a non-zero exit code.", RunE: configValidateCmdF, } var ConfigSubpathCmd = &cobra.Command{ Use: "subpath", Short: "Update client asset loading to use the configured subpath", Long: "Update the hard-coded production client asset paths to take into account Mattermost running on a subpath.", Example: ` config subpath
var ConfigGetCmd = &cobra.Command{ Use: "get", Short: "Get config setting", Long: "Gets the value of a config setting by its name in dot notation.", Example: `config get SqlSettings.DriverName`, Args: cobra.ExactArgs(1), RunE: configGetCmdF, } var ConfigShowCmd = &cobra.Command{ Use: "show", Short: "Writes the server configuration to STDOUT", Long: "Pretty-prints the server configuration and writes to STDOUT", Example: "config show", RunE: configShowCmdF, } var ConfigSetCmd = &cobra.Command{ Use: "set", Short: "Set config setting", Long: "Sets the value of a config setting by its name in dot notation. Accepts multiple values for array settings", Example: "config set SqlSettings.DriverName mysql", Args: cobra.MinimumNArgs(2), RunE: configSetCmdF, } func init() { ConfigSubpathCmd.Flags().String("path", "", "Optional subpath; defaults to value in SiteURL") ConfigCmd.AddCommand( ValidateConfigCmd, ConfigSubpathCmd, ConfigGetCmd, ConfigShowCmd, ConfigSetCmd, ) RootCmd.AddCommand(ConfigCmd) } func configValidateCmdF(command *cobra.Command, args []string) error { utils.TranslationsPreInit() model.AppErrorInit(utils.T) filePath, err := command.Flags().GetString("config") if err != nil { return err } filePath = utils.FindConfigFile(filePath) file, err := os.Open(filePath) if err != nil { return err } decoder := json.NewDecoder(file) config := model.Config{} err = decoder.Decode(&config) if err != nil { return err } if _, err := file.Stat(); err != nil { return err } if err := config.IsValid(); err != nil { return errors.New(utils.T(err.Id)) } CommandPrettyPrintln("The document is valid") return nil } func configSubpathCmdF(command *cobra.Command, args []string) error { a, err := InitDBCommandContextCobra(command) if err != nil { return err } defer a.Shutdown() path, err := command.Flags().GetString("path") if err != nil { return errors.Wrap(err, "failed reading path") } if path == "" { return utils.UpdateAssetsSubpathFromConfig(a.Config()) } if err := utils.UpdateAssetsSubpath(path); err != nil { return errors.Wrap(err, "failed to update assets subpath") } return nil } func configGetCmdF(command *cobra.Command, args []string) error { app, err := InitDBCommandContextCobra(command) if err != nil { return err } defer app.Shutdown() // create the model for config // Note: app.Config() returns a pointer, make appropriate changes config := app.Config() // get the print config setting and any error if there is out, err := printConfigValues(configToMap(*config), strings.Split(args[0], "."), args[0]) if err != nil { return err } fmt.Printf("%s", out) return nil } func configShowCmdF(command *cobra.Command, args []string) error { app, err := InitDBCommandContextCobra(command) if err != nil { return err } defer app.Shutdown() // check that no arguments are given err = cobra.NoArgs(command, args) if err != nil { return err } // set up the config object config := app.Config() // pretty print fmt.Printf("%s", prettyPrint(configToMap(*config))) return nil } // printConfigValues function prints out the value of the configSettings working recursively or // gives an error if config setting is not in the file. func printConfigValues(configMap map[string]interface{}, configSetting []string, name string) (string, error) { res, ok := configMap[configSetting[0]] if !ok { return "", fmt.Errorf("%s configuration setting is not in the file", name) } value := reflect.ValueOf(res) switch value.Kind() { case reflect.Map: if len(configSetting) == 1 { return printMap(value, 0), nil } return printConfigValues(res.(map[string]interface{}), configSetting[1:], name) default: if len(configSetting) == 1 { return fmt.Sprintf("%s: \"%v\"\n", name, res), nil } return "", fmt.Errorf("%s configuration setting is not in the file", name) } } // prettyPrint the map func prettyPrint(configMap map[string]interface{}) string { value := reflect.ValueOf(configMap) return printMap(value, 0) } // printMap takes a reflect.Value and print it out, recursively if its a map with the given tab settings. func printMap(value reflect.Value, tabVal int) string { out := &bytes.Buffer{} for _, key := range value.MapKeys() { val := value.MapIndex(key) if newVal, ok := val.Interface().(map[string]interface{}); !ok { fmt.Fprintf(out, "%s", strings.Repeat("\t", tabVal)) fmt.Fprintf(out, "%v: \"%v\"\n", key.Interface(), val.Interface()) } else { fmt.Fprintf(out, "%s", strings.Repeat("\t", tabVal)) fmt.Fprintf(out, "%v:\n", key.Interface()) // going one level in, increase the tab tabVal++ fmt.Fprintf(out, "%s", printMap(reflect.ValueOf(newVal), tabVal)) // coming back one level, decrease the tab tabVal-- } } return out.String() } func configSetCmdF(command *cobra.Command, args []string) error { app, err := InitDBCommandContextCobra(command) if err != nil { return err } defer app.Shutdown() // args[0] -> holds the config setting that we want to change // args[1:] -> the new value of the config setting configSetting := args[0] newVal := args[1:] // Update the config // first disable the watchers app.DisableConfigWatch() // create the function to update config oldConfig := app.Config() newConfig := app.Config() f := updateConfigValue(configSetting, newVal, oldConfig, newConfig) // update the config app.UpdateConfig(f) // Verify new config if err := newConfig.IsValid(); err != nil { return err } if err := utils.ValidateLocales(app.Config()); err != nil { return errors.New("Invalid locale configuration") } // make the changes persist app.PersistConfig() // reload config app.ReloadConfig() // Enable config watchers app.EnableConfigWatch() return nil } func updateConfigValue(configSetting string, newVal []string, oldConfig, newConfig *model.Config) func(*model.Config) { return func(update *model.Config) { // convert config to map[string]interface configMap := configToMap(*oldConfig) // iterate through the map and update the value or print an error and exit err := UpdateMap(configMap, strings.Split(configSetting, "."), newVal) if err != nil { fmt.Printf("%s\n", err) os.Exit(1) } // convert map to json bs, err := json.Marshal(configMap) if err != nil { fmt.Printf("Error while marshalling map to json %s\n", err) os.Exit(1) } // convert json to struct err = json.Unmarshal(bs, newConfig) if err != nil { fmt.Printf("Error while unmarshalling json to struct %s\n", err) os.Exit(1) } *update = *newConfig } } func UpdateMap(configMap map[string]interface{}, configSettings []string, newVal []string) error { res, ok := configMap[configSettings[0]] if !ok { return fmt.Errorf("unable to find a setting with that name %s", configSettings[0]) } value := reflect.ValueOf(res) switch value.Kind() { case reflect.Map: // we can only change the value of a particular setting, not the whole map, return error if len(configSettings) == 1 { return errors.New("unable to set multiple settings at once") } return UpdateMap(res.(map[string]interface{}), configSettings[1:], newVal) case reflect.Int: if len(configSettings) == 1 { val, err := strconv.Atoi(newVal[0]) if err != nil { return err } configMap[configSettings[0]] = val return nil } return fmt.Errorf("unable to find a setting with that name %s", configSettings[0]) case reflect.Int64: if len(configSettings) == 1 { val, err := strconv.Atoi(newVal[0]) if err != nil { return err } configMap[configSettings[0]] = int64(val) return nil } return fmt.Errorf("unable to find a setting with that name %s", configSettings[0]) case reflect.Bool: if len(configSettings) == 1 { val, err := strconv.ParseBool(newVal[0]) if err != nil { return err } configMap[configSettings[0]] = val return nil } return fmt.Errorf("unable to find a setting with that name %s", configSettings[0]) case reflect.String: if len(configSettings) == 1 { configMap[configSettings[0]] = newVal[0] return nil } return fmt.Errorf("unable to find a setting with that name %s", configSettings[0]) case reflect.Slice: if len(configSettings) == 1 { configMap[configSettings[0]] = newVal return nil } return fmt.Errorf("unable to find a setting with that name %s", configSettings[0]) default: return errors.New("type not supported yet") } } // configToMap converts our config into a map func configToMap(s interface{}) map[string]interface{} { return structToMap(s) } // structToMap converts a struct into a map func structToMap(t interface{}) map[string]interface{} { defer func() { if r := recover(); r != nil { mlog.Error(fmt.Sprintf("Panicked in structToMap. This should never happen. %v", r)) } }() val := reflect.ValueOf(t) if val.Kind() != reflect.Struct { return nil } out := map[string]interface{}{} for i := 0; i < val.NumField(); i++ { field := val.Field(i) var value interface{} switch field.Kind() { case reflect.Struct: value = structToMap(field.Interface()) case reflect.Ptr: indirectType := field.Elem() if indirectType.Kind() == reflect.Struct { value = structToMap(indirectType.Interface()) } else { value = indirectType.Interface() } default: value = field.Interface() } out[val.Type().Field(i).Name] = value } return out }
config subpath --path /mattermost config subpath --path /`, RunE: configSubpathCmdF, }
bpm.go
// Copyright 2021 Chaos Mesh Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package bpm import ( "context" "fmt" "io" "os" "os/exec" "sync" "syscall" "github.com/shirou/gopsutil/process" ctrl "sigs.k8s.io/controller-runtime" ) var log = ctrl.Log.WithName("background-process-manager") type NsType string const ( MountNS NsType = "mnt" // uts namespace is not supported yet // UtsNS NsType = "uts" IpcNS NsType = "ipc" NetNS NsType = "net" PidNS NsType = "pid" // user namespace is not supported yet // UserNS NsType = "user" ) var nsArgMap = map[NsType]string{ MountNS: "m", // uts namespace is not supported by nsexec yet // UtsNS: "u", IpcNS: "i", NetNS: "n", PidNS: "p", // user namespace is not supported by nsexec yet // UserNS: "U", } const ( pausePath = "/usr/local/bin/pause" nsexecPath = "/usr/local/bin/nsexec" DefaultProcPrefix = "/proc" ) // ProcessPair is an identifier for process type ProcessPair struct { Pid int CreateTime int64 } // Stdio contains stdin, stdout and stderr type Stdio struct { sync.Locker Stdin, Stdout, Stderr io.ReadWriteCloser } // BackgroundProcessManager manages all background processes type BackgroundProcessManager struct { deathSig *sync.Map identifiers *sync.Map stdio *sync.Map } // NewBackgroundProcessManager creates a background process manager func NewBackgroundProcessManager() BackgroundProcessManager { return BackgroundProcessManager{ deathSig: &sync.Map{}, identifiers: &sync.Map{}, stdio: &sync.Map{}, } } // StartProcess manages a process in manager func (m *BackgroundProcessManager) StartProcess(cmd *ManagedProcess) (*process.Process, error) { var identifierLock *sync.Mutex if cmd.Identifier != nil { lock, _ := m.identifiers.LoadOrStore(*cmd.Identifier, &sync.Mutex{}) identifierLock = lock.(*sync.Mutex) identifierLock.Lock() } err := cmd.Start() if err != nil { log.Error(err, "fail to start process") return nil, err } pid := cmd.Process.Pid procState, err := process.NewProcess(int32(cmd.Process.Pid)) if err != nil { return nil, err } ct, err := procState.CreateTime() if err != nil { return nil, err } pair := ProcessPair{ Pid: pid, CreateTime: ct, } channel, _ := m.deathSig.LoadOrStore(pair, make(chan bool, 1)) deathChannel := channel.(chan bool) stdio := &Stdio{Locker: &sync.Mutex{}} if cmd.Stdin != nil { if stdin, ok := cmd.Stdin.(io.ReadWriteCloser); ok { stdio.Stdin = stdin } } if cmd.Stdout != nil { if stdout, ok := cmd.Stdout.(io.ReadWriteCloser); ok { stdio.Stdout = stdout } } if cmd.Stderr != nil { if stderr, ok := cmd.Stderr.(io.ReadWriteCloser); ok { stdio.Stderr = stderr } } m.stdio.Store(pair, stdio) log := log.WithValues("pid", pid) go func() { err := cmd.Wait() if err != nil { if exitErr, ok := err.(*exec.ExitError); ok { status := exitErr.Sys().(syscall.WaitStatus) if status.Signaled() && status.Signal() == syscall.SIGTERM { log.Info("process stopped with SIGTERM signal") } } else { log.Error(err, "process exited accidentally") } } log.Info("process stopped") deathChannel <- true m.deathSig.Delete(pair) if io, loaded := m.stdio.LoadAndDelete(pair); loaded { if stdio, ok := io.(*Stdio); ok { stdio.Lock() if stdio.Stdin != nil { if err = stdio.Stdin.Close(); err != nil { log.Error(err, "stdin fails to be closed") } } if stdio.Stdout != nil { if err = stdio.Stdout.Close(); err != nil { log.Error(err, "stdout fails to be closed") } } if stdio.Stderr != nil { if err = stdio.Stderr.Close(); err != nil { log.Error(err, "stderr fails to be closed") } } stdio.Unlock() } } if identifierLock != nil { identifierLock.Unlock() m.identifiers.Delete(*cmd.Identifier) } }() return procState, nil } // KillBackgroundProcess sends SIGTERM to process func (m *BackgroundProcessManager) KillBackgroundProcess(ctx context.Context, pid int, startTime int64) error { log := log.WithValues("pid", pid) p, err := os.FindProcess(int(pid)) if err != nil { log.Error(err, "unreachable path. `os.FindProcess` will never return an error on unix") return err } procState, err := process.NewProcess(int32(pid)) if err != nil { // return successfully as the process has exited return nil } ct, err := procState.CreateTime() if err != nil { log.Error(err, "fail to read create time") // return successfully as the process has exited return nil } // There is a bug in calculating CreateTime in the new version of // gopsutils. This is a temporary solution before the upstream fixes it. if startTime-ct > 1000 || ct-startTime > 1000 { log.Info("process has already been killed", "startTime", ct, "expectedStartTime", startTime) // return successfully as the process has exited return nil } ppid, err := procState.Ppid() if err != nil { log.Error(err, "fail to read parent id") // return successfully as the process has exited return nil } if ppid != int32(os.Getpid()) { log.Info("process has already been killed", "ppid", ppid) // return successfully as the process has exited return nil } err = p.Signal(syscall.SIGTERM) if err != nil && err.Error() != "os: process already finished" { log.Error(err, "error while killing process") return err } pair := ProcessPair{ Pid: pid, CreateTime: startTime, } channel, ok := m.deathSig.Load(pair) if ok { deathChannel := channel.(chan bool) select { case <-deathChannel: case <-ctx.Done(): return ctx.Err() } } log.Info("Successfully killed process") return nil } func (m *BackgroundProcessManager) Stdio(pid int, startTime int64) *Stdio { log := log.WithValues("pid", pid) procState, err := process.NewProcess(int32(pid)) if err != nil
ct, err := procState.CreateTime() if err != nil { log.Error(err, "fail to read create time") // return successfully as the process has exited return nil } // There is a bug in calculating CreateTime in the new version of // gopsutils. This is a temporary solution before the upstream fixes it. if startTime-ct > 1000 || ct-startTime > 1000 { log.Info("process has exited", "startTime", ct, "expectedStartTime", startTime) // return successfully as the process has exited return nil } pair := ProcessPair{ Pid: pid, CreateTime: startTime, } io, ok := m.stdio.Load(pair) if !ok { log.Info("fail to load with pair", "pair", pair) // stdio is not stored return nil } return io.(*Stdio) } // DefaultProcessBuilder returns the default process builder func DefaultProcessBuilder(cmd string, args ...string) *ProcessBuilder { return &ProcessBuilder{ cmd: cmd, args: args, nsOptions: []nsOption{}, pause: false, identifier: nil, ctx: context.Background(), } } // ProcessBuilder builds a exec.Cmd for daemon type ProcessBuilder struct { cmd string args []string env []string nsOptions []nsOption pause bool localMnt bool identifier *string stdin io.ReadWriteCloser stdout io.ReadWriteCloser stderr io.ReadWriteCloser ctx context.Context } // GetNsPath returns corresponding namespace path func GetNsPath(pid uint32, typ NsType) string { return fmt.Sprintf("%s/%d/ns/%s", DefaultProcPrefix, pid, string(typ)) } // SetEnv sets the environment variables of the process func (b *ProcessBuilder) SetEnv(key, value string) *ProcessBuilder { b.env = append(b.env, fmt.Sprintf("%s=%s", key, value)) return b } // SetNS sets the namespace of the process func (b *ProcessBuilder) SetNS(pid uint32, typ NsType) *ProcessBuilder { return b.SetNSOpt([]nsOption{{ Typ: typ, Path: GetNsPath(pid, typ), }}) } // SetNSOpt sets the namespace of the process func (b *ProcessBuilder) SetNSOpt(options []nsOption) *ProcessBuilder { b.nsOptions = append(b.nsOptions, options...) return b } // SetIdentifier sets the identifier of the process func (b *ProcessBuilder) SetIdentifier(id string) *ProcessBuilder { b.identifier = &id return b } // EnablePause enables pause for process func (b *ProcessBuilder) EnablePause() *ProcessBuilder { b.pause = true return b } func (b *ProcessBuilder) EnableLocalMnt() *ProcessBuilder { b.localMnt = true return b } // SetContext sets context for process func (b *ProcessBuilder) SetContext(ctx context.Context) *ProcessBuilder { b.ctx = ctx return b } // SetStdin sets stdin for process func (b *ProcessBuilder) SetStdin(stdin io.ReadWriteCloser) *ProcessBuilder { b.stdin = stdin return b } // SetStdout sets stdout for process func (b *ProcessBuilder) SetStdout(stdout io.ReadWriteCloser) *ProcessBuilder { b.stdout = stdout return b } // SetStderr sets stderr for process func (b *ProcessBuilder) SetStderr(stderr io.ReadWriteCloser) *ProcessBuilder { b.stderr = stderr return b } type nsOption struct { Typ NsType Path string } // ManagedProcess is a process which can be managed by backgroundProcessManager type ManagedProcess struct { *exec.Cmd // If the identifier is not nil, process manager should make sure no other // process with this identifier is running when executing this command Identifier *string }
{ log.Info("fail to get process information", "pid", pid) // return successfully as the process has exited return nil }
generate.go
package generate import ( "encoding/json" "errors" "io" "io/ioutil" "log" "os" "os/exec" "path" "path/filepath" "readyGo/box" "readyGo/mapping" "readyGo/scalar" "regexp" "runtime" "strconv" "strings" "text/template" "readyGo/helper" "gopkg.in/yaml.v2" ) var ( //ErrInvalidProjectName is to define error information upon invalid project name ErrInvalidProjectName = errors.New("invalid project name; it must have only characters;no special chars,whitespaces,digits are allowed;") // ErrNoFile is to define error that no file provided ErrNoFile = errors.New("no file provided") // ErrEmptyMapping is to define error that mapping is empty. ErrEmptyMapping = errors.New("invalid mapping;mapping cannot be empty") //ErrEmptyImplementer is to define error that implementer is nil ErrEmptyImplementer = errors.New("invalid implementer;implmenter cannot be nil") // ErrInvalidTemlateGenerator is to define error that invalid template generator is provided ErrInvalidTemlateGenerator = errors.New("invalid template generater;try to instantiate it through generater.New function") // ErrInvalidRoot is to define error as invalid root directory ErrInvalidRoot = errors.New("invalid root directory") ) // New is to generate a new generater. func
(file *string, scalar scalar.Map, implementer Implementer) (tg *Generate, err error) { if file == nil || *file == "" { return nil, ErrNoFile } if implementer == nil { return nil, ErrEmptyImplementer } ext := filepath.Ext(*file) if ext != ".json" && ext != ".yaml" && ext != ".yml" { return nil, errors.New("only json | yaml | yml files are allowed ") } cFile, err := ioutil.ReadFile(*file) if err != nil { return nil, err } if ext == ".json" { err = json.Unmarshal([]byte(cFile), &tg) if err != nil { return nil, err } } if ext == ".yaml" || ext == ".yml" { err = yaml.Unmarshal([]byte(cFile), &tg) if err != nil { return nil, err } } // Logic to identify mapping projectType := tg.APISpec.Kind + "_" + tg.DatabaseSpec.Name if tg.MessagingSpec.Name != "" { projectType = projectType + "_" + tg.MessagingSpec.Name } //ops := boxops.New("../box") ops := &box.Box{} mapping, err := mapping.New(ops, "configs/mappings/"+projectType+".json", projectType) if err != nil { log.Fatal(err) } tg.Mapping = mapping tg.Scalars = scalar tg.Implementer = implementer err = tg.ChangeIden() if err != nil { return nil, err } matched, err := regexp.MatchString("^[a-zA-Z]*$", tg.Project) if !matched { return nil, ErrInvalidProjectName } if err != nil { return nil, err } err = tg.SetFieldCategory() if err != nil { return tg, err } err = tg.Validate() if err != nil { return nil, err } // This channel can be used in such a way that all generated output can be sent to this so that I can be prined properly if tg.Output == nil { tg.Output = make(chan string) } return tg, nil } // CreateAll creates all kinds of files based on the provided mappings func (tg *Generate) CreateAll() (err error) { if tg == nil { return ErrInvalidTemlateGenerator } if tg.Project == "" { return ErrInvalidRoot } // Todo write more conditions here for _, opsData := range tg.Mapping.OpsData { //fmt.Println(opsData) switch opsData.OpType { case "directories": path := filepath.Join(tg.Project, opsData.Src) tg.Output <- "generating the following directory :" + path err = os.MkdirAll(path, os.ModePerm) if err != nil { errRm := tg.RmDir() if errRm != nil { return errors.New("1." + err.Error() + ".2." + errRm.Error()) } return err } tg.Output <- "the following directory has been generated :" + path case "static-files": dst := filepath.Join(tg.Project, opsData.Dst) tg.Output <- "generating the following static file :" + dst content, err := tg.Mapping.Reader.Read(opsData.Src) if err != nil { errRm := tg.RmDir() if errRm != nil { return errors.New("1." + err.Error() + ".2." + errRm.Error()) } return err } if content != "" { var li int cos := runtime.GOOS switch cos { case "windows": li = strings.LastIndex(dst, "\\") default: li = strings.LastIndex(dst, "/") } dirs := dst[0:li] err = os.MkdirAll(dirs, 0755) if err != nil { errRm := tg.RmDir() if errRm != nil { return errors.New("1." + err.Error() + ".2." + errRm.Error()) } return err } err = ioutil.WriteFile(dst, []byte(content), 0644) if err != nil { errRm := tg.RmDir() if errRm != nil { return errors.New("1." + err.Error() + ".2." + errRm.Error()) } return err } tg.Output <- "The following static file has been generated :" + dst } case "multiple-file-templates": mhandler := make(map[string]interface{}) mhandler["Project"] = tg.Project mhandler["config"] = tg if opsData.GenForType == "both" { for _, v := range tg.Models { mhandler["Model"] = v dst := filepath.Join(tg.Project, opsData.Dst) tg.Output <- "generating template based file :" + dst err = os.MkdirAll(dst, 0755) if err != nil { errRm := tg.RmDir() if errRm != nil { return errors.New("1." + err.Error() + ".2." + errRm.Error()) } return err } // If there is any extension in the opsData that means file to be created with the given extension. Otherwise create a default one with .go if opsData.Ext == "" { dst = path.Join(tg.Project, opsData.Dst, strings.ToLower(v.Name)+".go") } else { // If any extension starts with . add the extension as it is.Otherwise add . as a prefix to the opsData.Ext if string(strings.Trim(opsData.Ext, " ")[0]) == "." { dst = path.Join(tg.Project, opsData.Dst, strings.ToLower(v.Name)+opsData.Ext) } else { dst = path.Join(tg.Project, opsData.Dst, strings.ToLower(v.Name)+"."+opsData.Ext) } } content, err := tg.Mapping.Reader.Read(opsData.Src) if err != nil { errRm := tg.RmDir() if errRm != nil { return errors.New("1." + err.Error() + ".2." + errRm.Error()) } return err } if content != "" { err := tg.WriteTmplToFile(dst, content, mhandler) if err != nil { errRm := tg.RmDir() if errRm != nil { return errors.New("1." + err.Error() + ".2." + errRm.Error()) } return err } tg.Output <- "the following templated based file has been generated :" + dst } } } else if opsData.GenForType == "main" { for _, v := range tg.Models { if v.Type == "main" { mhandler["Model"] = v dst := filepath.Join(tg.Project, opsData.Dst) tg.Output <- "generating template based file :" + dst err = os.MkdirAll(dst, 0755) if err != nil { errRm := tg.RmDir() if errRm != nil { return errors.New("1." + err.Error() + ".2." + errRm.Error()) } return err } // If there is any extension in the opsData that means file to be created with the given extension. Otherwise create a default one with .go if opsData.Ext == "" { dst = path.Join(tg.Project, opsData.Dst, strings.ToLower(v.Name)+".go") } else { // If any extension starts with . add the extension as it is.Otherwise add . as a prefix to the opsData.Ext if string(strings.Trim(opsData.Ext, " ")[0]) == "." { dst = path.Join(tg.Project, opsData.Dst, strings.ToLower(v.Name)+opsData.Ext) } else { dst = path.Join(tg.Project, opsData.Dst, strings.ToLower(v.Name)+"."+opsData.Ext) } } content, err := tg.Mapping.Reader.Read(opsData.Src) if err != nil { errRm := tg.RmDir() if errRm != nil { return errors.New("1." + err.Error() + ".2." + errRm.Error()) } return err } if content != "" { err := tg.WriteTmplToFile(dst, content, mhandler) if err != nil { errRm := tg.RmDir() if errRm != nil { return errors.New("1." + err.Error() + ".2." + errRm.Error()) } return err } tg.Output <- "the following templated based file has been generated :" + dst } } } } else if opsData.GenForType == "sub" { for _, v := range tg.Models { if v.Type == "sub" { mhandler["Model"] = v dst := filepath.Join(tg.Project, opsData.Dst) tg.Output <- "generating template based file :" + dst err = os.MkdirAll(dst, 0755) if err != nil { errRm := tg.RmDir() if errRm != nil { return errors.New("1." + err.Error() + ".2." + errRm.Error()) } return err } // If there is any extension in the opsData that means file to be created with the given extension. Otherwise create a default one with .go if opsData.Ext == "" { dst = path.Join(tg.Project, opsData.Dst, strings.ToLower(v.Name)+".go") } else { // If any extension starts with . add the extension as it is.Otherwise add . as a prefix to the opsData.Ext if string(strings.Trim(opsData.Ext, " ")[0]) == "." { dst = path.Join(tg.Project, opsData.Dst, strings.ToLower(v.Name)+opsData.Ext) } else { dst = path.Join(tg.Project, opsData.Dst, strings.ToLower(v.Name)+"."+opsData.Ext) } } content, err := tg.Mapping.Reader.Read(opsData.Src) if err != nil { errRm := tg.RmDir() if errRm != nil { return errors.New("1." + err.Error() + ".2." + errRm.Error()) } return err } if content != "" { err := tg.WriteTmplToFile(dst, content, mhandler) if err != nil { errRm := tg.RmDir() if errRm != nil { return errors.New("1." + err.Error() + ".2." + errRm.Error()) } return err } tg.Output <- "the following templated based file has been generated :" + dst } } } } case "single-file-templates": // Todo for opsData.Ext if there is an extension mhandler := make(map[string]interface{}) mhandler["config"] = tg dst := path.Join(tg.Project, opsData.Dst) tg.Output <- "generating template based contents to the file :" + dst li := strings.LastIndex(dst, "/") dirs := dst[0:li] err = os.MkdirAll(dirs, 0755) if err != nil { errRm := tg.RmDir() if errRm != nil { return errors.New("1." + err.Error() + ".2." + errRm.Error()) } return err } content, err := tg.Mapping.Reader.Read(opsData.Src) if err != nil { errRm := tg.RmDir() if errRm != nil { return errors.New("1." + err.Error() + ".2." + errRm.Error()) } return err } if content != "" { err := tg.WriteTmplToFile(dst, content, mhandler) if err != nil { errRm := tg.RmDir() if errRm != nil { return errors.New("1." + err.Error() + ".2." + errRm.Error()) } return err } tg.Output <- "The following templated based file has been generated :" + dst } case "exec": // Todo for opsData.Ext if there is an extension mhandler := make(map[string]interface{}) mhandler["config"] = tg dst := path.Join(tg.Project, opsData.Dst) tg.Output <- "generating shall based executable files :" + dst li := strings.LastIndex(dst, "/") dirs := dst[0:li] err = os.MkdirAll(dirs, 0755) if err != nil { errRm := tg.RmDir() if errRm != nil { return errors.New("1." + err.Error() + ".2." + errRm.Error()) } return err } content, err := tg.Mapping.Reader.Read(opsData.Src) if err != nil { errRm := tg.RmDir() if errRm != nil { return errors.New("1." + err.Error() + ".2." + errRm.Error()) } return err } if content != "" { err := tg.WriteTmplToFile(dst, content, mhandler) if err != nil { errRm := tg.RmDir() if errRm != nil { return errors.New("1." + err.Error() + ".2." + errRm.Error()) } return err } tg.Output <- "The following shell file has been generated :" + dst err = os.Chmod(dst, 0700) if err != nil { return err } tg.Output <- "giving read|writeexecute permissions to the file :" + dst } default: return errors.New(opsData.OpType + ":this type has no implementation") } } return nil } // RmDir is to remove dirs func (tg *Generate) RmDir() (err error) { tg.Output <- "removing all directories of the project :" + tg.Project err = os.RemoveAll(tg.Project) if err != nil { return err } tg.Output <- "all directories in the following project has been removed :" + tg.Project return nil } // WriteTmplToFile is to convert from template to a file func (tg *Generate) WriteTmplToFile(filePath string, tmpl string, data interface{}) (err error) { file, err := os.Create(filePath) if err != nil { return err } t := template.Must(template.New("toFile").Funcs(template.FuncMap{ "ToLower": func(str string) string { return strings.ToLower(str) }, }).Funcs( template.FuncMap{ "Initial": func(str string) string { if len(str) > 0 { return string(strings.ToLower(str)[0]) } return "x" }, }).Funcs(template.FuncMap{ "Counter": func(str string) string { if s, err := strconv.Atoi(str); err == nil { count := s + 1 return strconv.Itoa(count) } return "0" }}).Funcs(template.FuncMap{ "GoType": func(tpe string) string { if scler := tg.Scalars.GetScalar(tpe); scler != nil { return scler.GoType } return "" }}).Funcs(template.FuncMap{ "GrpcType": func(tpe string) string { if scler := tg.Scalars.GetScalar(tpe); scler != nil { return scler.GrpcType } return "" }}).Funcs(template.FuncMap{ "GrpcArrayModel": func(tpe string) string { ss := strings.Split(tpe, "[]") if len(ss) > 1 { return ss[1] } return "" }}).Funcs(template.FuncMap{ "GoRegExFormat": func(str string) string { if str == "" { return "" } str = strings.Trim(str, " ") //strbuff := []byte(str) if len(str) > 2 { // strbuff[0] = 96 // strbuff[len(strbuff)-1] = 96 stroriginal := str str = strings.Replace(str[1:len(str)-1], "`", `"`+"`"+`"`, -2) return string(stroriginal[0]) + str + string(stroriginal[len(stroriginal)-1]) } return string(str) }}).Parse(tmpl)) err = t.Execute(file, data) if err != nil { return err } return nil } func ExecuteCommand(filename string) (string, error) { cmd, err := exec.Command("/bin/sh", filename).Output() if err != nil { return "", err } output := string(cmd) return output, nil } // Execute executes given shell files func (tg *Generate) Execute() (err error) { if !helper.IsWindows() { if tg == nil { return ErrInvalidTemlateGenerator } if tg.Project == "" { return ErrInvalidRoot } for _, opsData := range tg.Mapping.OpsData { switch opsData.OpType { case "exec": // Todo for opsData.Ext if there is an extension tg.Output <- "executing the following file:" + opsData.Dst mhandler := make(map[string]interface{}) mhandler["config"] = tg dst := path.Join(tg.Project, opsData.Dst) output, err := ExecuteCommand(dst) if err != nil { return err } tg.Output <- "the following file has been executed:" + opsData.Dst tg.Output <- output // Sending output to the channel default: } } } return nil } // WriteOutput this should be a go routine func (tg *Generate) WriteOutput(w io.Writer) { for output := range tg.Output { output = "\n" + output // Add a new line to the output _, err := w.Write([]byte(output)) if err != nil { log.Fatal(err) } } }
New
TestImages.ts
module TestRuns { export function Te
{ function loadStream(name: string, cb: (bs: Data.ArrayBitStream) => void) { var oReq = new XMLHttpRequest(); oReq.open("GET", name, true); oReq.responseType = "arraybuffer"; oReq.onload = function (oEvent) { var arrayBuffer = oReq.response; // Note: not oReq.responseText cb(new Data.ArrayBitStream(<any>(new Uint8Array(arrayBuffer)))); }; oReq.send(null); } var managers: Managers.ManagerSet = null; var f = 'Data/SETMENU.LZS'; loadStream(f, function (bsIntro) { loadStream(f, function (bsAnim) { var parts = new Images.ImageSetLoader(managers).load(bsIntro); var anim = new Images.ImageSetLoader(managers).load(bsAnim); var canvas = document.createElement('canvas'); canvas.width = 320; canvas.height = 200; document.body.appendChild(canvas); var i = 0; var ctx: CanvasRenderingContext2D = canvas.getContext('2d'); function drawFrame() { ctx.fillStyle = '#000000'; ctx.fillRect(0, 0, canvas.width, canvas.height); document.title = 'IMG: ' + i; ctx.drawImage(parts[i].Canvas, parts[i].XOffset, parts[i].YOffset); requestAnimationFrame(drawFrame); } requestAnimationFrame(drawFrame); setInterval(function () { i = (i + 1) % parts.length; }, 1000); }); }); }; }
stAnims()
generate_t_k_g_config_for_vsphere_parameters.go
// Code generated by go-swagger; DO NOT EDIT. package vsphere // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "io" "net/http" "github.com/go-openapi/errors" "github.com/go-openapi/runtime" "github.com/go-openapi/runtime/middleware" models "github.com/vmware-tanzu-private/core/pkg/v1/tkg/web/server/models" ) // NewGenerateTKGConfigForVsphereParams creates a new GenerateTKGConfigForVsphereParams object // no default values defined in spec. func NewGenerateTKGConfigForVsphereParams() GenerateTKGConfigForVsphereParams { return GenerateTKGConfigForVsphereParams{} } // GenerateTKGConfigForVsphereParams contains all the bound params for the generate t k g config for vsphere operation // typically these are obtained from a http.Request // // swagger:parameters generateTKGConfigForVsphere type GenerateTKGConfigForVsphereParams struct { // HTTP Request Object HTTPRequest *http.Request `json:"-"` /*params to generate tkg configuration for vsphere Required: true In: body */ Params *models.VsphereRegionalClusterParams } // BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface // for simple values it will use straight method calls. // // To ensure default values, the struct must have been initialized with NewGenerateTKGConfigForVsphereParams() beforehand. func (o *GenerateTKGConfigForVsphereParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { var res []error o.HTTPRequest = r if runtime.HasBody(r) { defer r.Body.Close() var body models.VsphereRegionalClusterParams if err := route.Consumer.Consume(r.Body, &body); err != nil { if err == io.EOF { res = append(res, errors.Required("params", "body")) } else { res = append(res, errors.NewParseError("params", "body", "", err)) } } else { // validate body object if err := body.Validate(route.Formats); err != nil { res = append(res, err) } if len(res) == 0 { o.Params = &body }
res = append(res, errors.Required("params", "body")) } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil }
} } else {
rollup.config.js
import typescript from 'rollup-plugin-typescript2' import commonjs from 'rollup-plugin-commonjs' import external from 'rollup-plugin-peer-deps-external' // import postcss from 'rollup-plugin-postcss-modules' import postcss from 'rollup-plugin-postcss' import resolve from 'rollup-plugin-node-resolve' import url from 'rollup-plugin-url' import svgr from '@svgr/rollup' import pkg from './package.json' export default { input: 'src/index.tsx', output: [ { file: pkg.main, format: 'cjs', exports: 'named', sourcemap: true }, { file: pkg.module, format: 'es', exports: 'named', sourcemap: true } ], plugins: [ external(), postcss(), url(), svgr(), resolve(),
commonjs() ] }
typescript({ rollupCommonJSResolveHack: true, clean: true }),
app.e2e-spec.ts
import { AppPage } from './app.po'; import { browser, logging } from 'protractor'; describe('workspace-project App', () => { let page: AppPage; beforeEach(() => { page = new AppPage(); }); it('should display welcome message', () => { page.navigateTo(); expect(page.getTitleText()).toEqual('appp app is running!');
// Assert that there are no errors emitted from the browser const logs = await browser.manage().logs().get(logging.Type.BROWSER); expect(logs).not.toContain(jasmine.objectContaining({ level: logging.Level.SEVERE, } as logging.Entry)); }); });
}); afterEach(async () => {
federation.go
package federation import ( "bytes" "fmt" "io/ioutil" "os" "path/filepath" "strings" "github.com/99designs/gqlgen/codegen" "github.com/99designs/gqlgen/codegen/config" "github.com/99designs/gqlgen/codegen/templates" "github.com/99designs/gqlgen/plugin" "github.com/vektah/gqlparser" "github.com/vektah/gqlparser/ast" "github.com/vektah/gqlparser/formatter" ) type federation struct { SDL string Entities []*Entity } // New returns a federation plugin that injects // federated directives and types into the schema func New() plugin.Plugin { return &federation{} } // Name returns the plugin name func (f *federation) Name() string { return "federation" } // MutateConfig mutates the configuration func (f *federation) MutateConfig(cfg *config.Config) error { entityFields := map[string]config.TypeMapField{} for _, e := range f.Entities { entityFields[e.ResolverName] = config.TypeMapField{Resolver: true} for _, r := range e.Requires { if cfg.Models[e.Def.Name].Fields == nil { model := cfg.Models[e.Def.Name] model.Fields = map[string]config.TypeMapField{} cfg.Models[e.Def.Name] = model } cfg.Models[e.Def.Name].Fields[r.Name] = config.TypeMapField{Resolver: true} } } builtins := config.TypeMap{ "_Service": { Model: config.StringList{ "github.com/99designs/gqlgen/plugin/federation.Service", }, }, "_Any": { Model: config.StringList{"github.com/99designs/gqlgen/graphql.Map"}, }, } if len(entityFields) > 0 { builtins["Entity"] = config.TypeMapEntry{ Fields: entityFields, } } for typeName, entry := range builtins { if cfg.Models.Exists(typeName) { return fmt.Errorf("%v already exists which must be reserved when Federation is enabled", typeName) } cfg.Models[typeName] = entry } cfg.Directives["external"] = config.DirectiveConfig{SkipRuntime: true} cfg.Directives["requires"] = config.DirectiveConfig{SkipRuntime: true} cfg.Directives["provides"] = config.DirectiveConfig{SkipRuntime: true} cfg.Directives["key"] = config.DirectiveConfig{SkipRuntime: true} cfg.Directives["extends"] = config.DirectiveConfig{SkipRuntime: true} return nil } // InjectSources creates a GraphQL Entity type with all // the fields that had the @key directive func (f *federation) InjectSources(cfg *config.Config) { cfg.AdditionalSources = append(cfg.AdditionalSources, f.getSource(false)) f.setEntities(cfg) if len(f.Entities) == 0 { // It's unusual for a service not to have any entities, but // possible if it only exports top-level queries and mutations. return } s := "type Entity {\n" for _, e := range f.Entities { s += fmt.Sprintf("\t%s(%s: %s): %s!\n", e.ResolverName, e.Field.Name, e.Field.Type.String(), e.Def.Name) } s += "}" cfg.AdditionalSources = append(cfg.AdditionalSources, &ast.Source{Name: "entity.graphql", Input: s, BuiltIn: true}) } // addEntityToSchema adds the _Entity Union and _entities query to schema. // This is part of MutateSchema. func (f *federation) addEntityToSchema(s *ast.Schema) { // --- Set _Entity Union --- union := &ast.Definition{ Name: "_Entity", Kind: ast.Union, Description: "A union unifies all @entity types (TODO: interfaces)", Types: []string{}, } for _, ent := range f.Entities { union.Types = append(union.Types, ent.Def.Name) s.AddPossibleType("_Entity", ent.Def) s.AddImplements(ent.Def.Name, union) } s.Types[union.Name] = union // --- Set _entities query --- fieldDef := &ast.FieldDefinition{ Name: "_entities", Type: ast.NonNullListType(ast.NamedType("_Entity", nil), nil), Arguments: ast.ArgumentDefinitionList{ { Name: "representations", Type: ast.NonNullListType(ast.NonNullNamedType("_Any", nil), nil), }, }, } if s.Query == nil { s.Query = &ast.Definition{ Kind: ast.Object, Name: "Query", } s.Types["Query"] = s.Query } s.Query.Fields = append(s.Query.Fields, fieldDef) } // addServiceToSchema adds the _Service type and _service query to schema. // This is part of MutateSchema. func (f *federation) addServiceToSchema(s *ast.Schema) { typeDef := &ast.Definition{ Kind: ast.Object, Name: "_Service", Fields: ast.FieldList{ &ast.FieldDefinition{ Name: "sdl", Type: ast.NonNullNamedType("String", nil), }, }, } s.Types[typeDef.Name] = typeDef // --- set _service query --- _serviceDef := &ast.FieldDefinition{ Name: "_service", Type: ast.NonNullNamedType("_Service", nil), } s.Query.Fields = append(s.Query.Fields, _serviceDef) } // MutateSchema creates types and query declarations // that are required by the federation spec. func (f *federation) MutateSchema(s *ast.Schema) error { // It's unusual for a service not to have any entities, but // possible if it only exports top-level queries and mutations. if len(f.Entities) > 0 { f.addEntityToSchema(s) } f.addServiceToSchema(s) return nil } func (f *federation) getSource(builtin bool) *ast.Source { return &ast.Source{ Name: "federation.graphql", Input: `# Declarations as required by the federation spec # See: https://www.apollographql.com/docs/apollo-server/federation/federation-spec/ scalar _Any scalar _FieldSet directive @external on FIELD_DEFINITION directive @requires(fields: _FieldSet!) on FIELD_DEFINITION directive @provides(fields: _FieldSet!) on FIELD_DEFINITION directive @key(fields: _FieldSet!) on OBJECT | INTERFACE directive @extends on OBJECT `, BuiltIn: builtin, } } // Entity represents a federated type // that was declared in the GQL schema. type Entity struct { Field *ast.FieldDefinition FieldTypeGo string // The Go representation of that field type ResolverName string // The resolver name, such as FindUserByID Def *ast.Definition Requires []*Requires } // Requires represents an @requires clause type Requires struct { Name string // the name of the field Fields []*RequireField // the name of the sibling fields } // RequireField is similar to an entity but it is a field not // an object type RequireField struct { Name string // The same name as the type declaration NameGo string // The Go struct field name TypeReference *config.TypeReference // The Go representation of that field type } func (f *federation) GenerateCode(data *codegen.Data) error { sdl, err := f.getSDL(data.Config) if err != nil { return err } f.SDL = sdl if len(f.Entities) > 0 { data.Objects.ByName("Entity").Root = true for _, e := range f.Entities { obj := data.Objects.ByName(e.Def.Name) for _, f := range obj.Fields { if f.Name == e.Field.Name { e.FieldTypeGo = f.TypeReference.GO.String() } for _, r := range e.Requires { for _, rf := range r.Fields { if rf.Name == f.Name { rf.TypeReference = f.TypeReference rf.NameGo = f.GoFieldName } } } } } } return templates.Render(templates.Options{ PackageName: data.Config.Exec.Package, Filename: "service.go", Data: f, GeneratedHeader: true, }) } func (f *federation) setEntities(cfg *config.Config) { schema, err := cfg.LoadSchema() if err != nil { panic(err) } for _, schemaType := range schema.Types { if schemaType.Kind == ast.Object {
if dir != nil { fieldName := dir.Arguments[0].Value.Raw // TODO: multiple arguments, and multiple keys if strings.Contains(fieldName, " ") { panic("only single fields are currently supported in @key declaration") } field := schemaType.Fields.ForName(fieldName) requires := []*Requires{} for _, f := range schemaType.Fields { dir := f.Directives.ForName("requires") if dir == nil { continue } fields := strings.Split(dir.Arguments[0].Value.Raw, " ") requireFields := []*RequireField{} for _, f := range fields { requireFields = append(requireFields, &RequireField{ Name: f, }) } requires = append(requires, &Requires{ Name: f.Name, Fields: requireFields, }) } f.Entities = append(f.Entities, &Entity{ Field: field, Def: schemaType, ResolverName: fmt.Sprintf("find%sBy%s", schemaType.Name, templates.ToGo(fieldName)), Requires: requires, }) } } } } func (f *federation) getSDL(c *config.Config) (string, error) { sources := []*ast.Source{f.getSource(true)} for _, filename := range c.SchemaFilename { filename = filepath.ToSlash(filename) var err error var schemaRaw []byte schemaRaw, err = ioutil.ReadFile(filename) if err != nil { fmt.Fprintln(os.Stderr, "unable to open schema: "+err.Error()) os.Exit(1) } sources = append(sources, &ast.Source{Name: filename, Input: string(schemaRaw)}) } schema, err := gqlparser.LoadSchema(sources...) if err != nil { return "", err } var buf bytes.Buffer formatter.NewFormatter(&buf).FormatSchema(schema) return buf.String(), nil } // Service is the service object that the // generated.go file will return for the _service // query type Service struct { SDL string `json:"sdl"` }
dir := schemaType.Directives.ForName("key") // TODO: interfaces
fileutil.py
"""File utility functions for Sphinx.""" import os import posixpath from typing import TYPE_CHECKING, Callable, Dict from docutils.utils import relative_path from sphinx.util.osutil import copyfile, ensuredir from sphinx.util.typing import PathMatcher if TYPE_CHECKING: from sphinx.util.template import BaseRenderer def copy_asset_file(source: str, destination: str, context: Dict = None, renderer: "BaseRenderer" = None) -> None: """Copy an asset file to destination. On copying, it expands the template variables if context argument is given and the asset is a template file. :param source: The path to source file :param destination: The path to destination file or directory :param context: The template variables. If not given, template files are simply copied :param renderer: The template engine. If not given, SphinxRenderer is used by default """ if not os.path.exists(source): return if os.path.isdir(destination): # Use source filename if destination points a directory destination = os.path.join(destination, os.path.basename(source)) if source.lower().endswith('_t') and context is not None: if renderer is None: from sphinx.util.template import SphinxRenderer renderer = SphinxRenderer() with open(source, encoding='utf-8') as fsrc: if destination.lower().endswith('_t'): destination = destination[:-2] with open(destination, 'w', encoding='utf-8') as fdst: fdst.write(renderer.render_string(fsrc.read(), context)) else: copyfile(source, destination) def copy_asset(source: str, destination: str, excluded: PathMatcher = lambda path: False, context: Dict = None, renderer: "BaseRenderer" = None, onerror: Callable[[str, Exception], None] = None) -> None: """Copy asset files to destination recursively. On copying, it expands the template variables if context argument is given and the asset is a template file. :param source: The path to source file or directory :param destination: The path to destination directory :param excluded: The matcher to determine the given path should be copied or not :param context: The template variables. If not given, template files are simply copied :param renderer: The template engine. If not given, SphinxRenderer is used by default :param onerror: The error handler. """ if not os.path.exists(source):
if renderer is None: from sphinx.util.template import SphinxRenderer renderer = SphinxRenderer() ensuredir(destination) if os.path.isfile(source): copy_asset_file(source, destination, context, renderer) return for root, dirs, files in os.walk(source, followlinks=True): reldir = relative_path(source, root) for dir in dirs[:]: if excluded(posixpath.join(reldir, dir)): dirs.remove(dir) else: ensuredir(posixpath.join(destination, reldir, dir)) for filename in files: if not excluded(posixpath.join(reldir, filename)): try: copy_asset_file(posixpath.join(root, filename), posixpath.join(destination, reldir), context, renderer) except Exception as exc: if onerror: onerror(posixpath.join(root, filename), exc) else: raise
return
testclasses.ts
import { TEST } from ".."; export const serializers = { ClassWithDecorator: { pack: value => null, unpack: value => new ClassWithDecorator() } }; export class
{ @TEST(__filename, { disabled: false }) name() { return "name"; } }
ClassWithDecorator
promise-deferred-rejection.js
/** * Promise with deferred rejection. * @module */ /** Private data repository. */ const privr = new WeakMap; /** * Internal promise was rejected. * If deferral has already ended, deliver immediately. * Otherwise, save rejection reason and defer for later delivery. */ function onRejected (reason) { const priv = privr.get(this); if (!priv.defer) return priv.reject(reason); priv.deferred = true; priv.reason = reason; } /** Promise with deferred rejection. */ class PromiseDeferredRejection extends Promise { /** * Creates internal promise for complete standard functionality.
*/ constructor (executor) { const priv = { defer: true, deferred: false, reason: undefined, reject: null }; let resolve; super((res,rej)=>{ resolve = res; priv.reject = rej; }); privr.set(this,priv); let internalResolve, internalReject; const internalPromise = new Promise((res,rej)=>{ internalResolve = res; internalReject = rej; }); internalPromise.then( resolve, onRejected.bind(this) ); executor(internalResolve,internalReject); } /** Ends deferral on first rejection reaction. */ then (onFulfilled, onRejected) { const priv = privr.get(this); if (!priv.defer) return super.then (onFulfilled, onRejected); const result = super.then (onFulfilled, onRejected); if (typeof onRejected === 'function') this.release(); return result; } /** Ends deferral. Delivers deferred rejection if pending. */ release () { const priv = privr.get(this); if (!priv.defer) return; priv.defer = false; if (priv.deferred) { const reason = priv.reason; priv.deferred = false; priv.reason = undefined; priv.reject(reason); } } /** Derive standard promises. Too easy to swallow rejections otherwise. */ static get [Symbol.species] () { return Promise; } } /** Batch of promises with centralized deferral ending. */ class PromiseDeferredRejectionBatch { constructor () { const priv = { promises: new Set, defer: true }; privr.set(this,priv); /** Batch subclass. Ensures all derived promises are in the batch. */ priv.constructor = class extends PromiseDeferredRejection { constructor (executor) { super(executor); if(priv.defer) priv.promises.add(this); else this.release(); } static get [Symbol.species] () { return priv.constructor; } } } create (executor) { const priv = privr.get(this); return new priv.constructor(executor); } release () { const priv = privr.get(this); if (!priv.defer) return; priv.defer = false; const promises = priv.promises; { let promise; for (promise of promises) { promise.release(); }} priv.promises = null; } } module.exports = { PromiseDeferredRejection, PromiseDeferredRejectionBatch };
* Passes through internal promise fulfillment unconditionally. * Intercepts internal promise rejection and defers or delivers conditionally.
FinancialAdvisorDemoAlgorithm.py
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals. # Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from clr import AddReference AddReference("System") AddReference("QuantConnect.Algorithm") AddReference("QuantConnect.Common") from System import * from QuantConnect import * from QuantConnect.Algorithm import * ### <summary> ### This algorithm demonstrates how to submit orders to a Financial Advisor account group, allocation profile or a single managed account. ### </summary> ### <meta name="tag" content="using data" /> ### <meta name="tag" content="using quantconnect" /> ### <meta name="tag" content="trading and orders" /> ### <meta name="tag" content="financial advisor" /> class Fi
CAlgorithm): def Initialize(self): # Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must be initialized. self.SetStartDate(2013,10,07) #Set Start Date self.SetEndDate(2013,10,11) #Set End Date self.SetCash(100000) #Set Strategy Cash self.symbol = self.AddEquity("SPY", Resolution.Second).Symbol # The default order properties can be set here to choose the FA settings # to be automatically used in any order submission method (such as SetHoldings, Buy, Sell and Order) # Use a default FA Account Group with an Allocation Method self.DefaultOrderProperties = InteractiveBrokersOrderProperties() # account group created manually in IB/TWS self.DefaultOrderProperties.FaGroup = "TestGroupEQ" # supported allocation methods are: EqualQuantity, NetLiq, AvailableEquity, PctChange self.DefaultOrderProperties.FaMethod = "EqualQuantity" # set a default FA Allocation Profile # DefaultOrderProperties = InteractiveBrokersOrderProperties() # allocation profile created manually in IB/TWS # self.DefaultOrderProperties.FaProfile = "TestProfileP" # send all orders to a single managed account # DefaultOrderProperties = InteractiveBrokersOrderProperties() # a sub-account linked to the Financial Advisor master account # self.DefaultOrderProperties.Account = "DU123456" def OnData(self, data): # OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here. if not self.Portfolio.Invested: # when logged into IB as a Financial Advisor, this call will use order properties # set in the DefaultOrderProperties property of QCAlgorithm self.SetHoldings("SPY", 1)
nancialAdvisorDemoAlgorithm(Q
service.rs
use crate::logger::message; use anyhow::Result; use indoc::indoc; use inflector::Inflector; use std::path::PathBuf; struct Service { pub config: ServiceConfig, pub file_contents: String, } struct ServiceConfig { pub model_name: String, pub file_name: String, } pub fn create(resource_name: &str, base_endpoint_path: &str) -> Result<()> { let resource = generate(resource_name); crate::fs::add_rust_file( "backend/services", resource.config.file_name.as_str(), resource.file_contents.as_str(), )?; crate::service::register_service(resource.config.file_name.as_str(), base_endpoint_path)?; Ok(()) } fn config(service_name: &str) -> ServiceConfig { let model_name = service_name.to_pascal_case(); let file_name = model_name.to_snake_case(); return ServiceConfig { model_name: model_name, file_name: file_name, }; } fn generate(service_name: &str) -> Service { let config = config(service_name); let contents_template: &str = indoc! {" use crate::models::$FILE_NAME::{$MODEL_NAME, $MODEL_NAMEChangeset}; use crate::models::{ID, PaginationParams}; use crate::Pool; use actix_web::{delete, get, post, put, Error as AWError}; use actix_web::{web, HttpResponse}; #[get(\"\")] async fn index( pool: web::Data<Pool>, web::Query(info): web::Query<PaginationParams> ) -> Result<HttpResponse, AWError> { let db = pool.get().unwrap(); Ok($MODEL_NAME::read_all(&db, &info) .map(|items| HttpResponse::Ok().json(items)) .map_err(|_| HttpResponse::InternalServerError())?) } #[get(\"/{id}\")] async fn read( pool: web::Data<Pool>, web::Path(item_id): web::Path<ID> ) -> Result<HttpResponse, AWError> { let db = pool.get().unwrap(); Ok($MODEL_NAME::read(&db, item_id) .map(|item| HttpResponse::Found().json(item)) .map_err(|_| HttpResponse::NotFound())?) } #[post(\"\")] async fn create( pool: web::Data<Pool>, web::Json(item): web::Json<$MODEL_NAMEChangeset> ) -> Result<HttpResponse, AWError> { let db = pool.get().unwrap(); Ok($MODEL_NAME::create(&db, &item) .map(|item| HttpResponse::Created().json(item)) .map_err(|_| HttpResponse::InternalServerError())?) } #[put(\"/{id}\")] async fn update( pool: web::Data<Pool>, web::Path(item_id): web::Path<ID>, web::Json(item): web::Json<$MODEL_NAMEChangeset> ) -> Result<HttpResponse, AWError> { let db = pool.get().unwrap(); Ok($MODEL_NAME::update(&db, item_id, &item) .map(|item| HttpResponse::Ok().json(item)) .map_err(|_| HttpResponse::InternalServerError())?) } #[delete(\"/{id}\")] async fn destroy( pool: web::Data<Pool>, web::Path(item_id): web::Path<ID>, ) -> Result<HttpResponse, AWError> { let db = pool.get().unwrap(); Ok($MODEL_NAME::delete(&db, item_id) .map(|_| HttpResponse::Ok().finish()) .map_err(|_| HttpResponse::InternalServerError().finish())?) } pub fn endpoints(scope: actix_web::Scope) -> actix_web::Scope { return scope .service(index) .service(read) .service(create) .service(update) .service(destroy); } "}; let contents = String::from(contents_template) .replace("$MODEL_NAME", config.model_name.as_str()) .replace("$FILE_NAME", config.file_name.as_str()); Service { config: config, file_contents: contents, } } pub fn register_service(service_file_name: &str, service_base_endpoint_path: &str) -> Result<()> { message(&format!("Registering service {}", service_file_name)); let main_file_path = PathBuf::from("backend/main.rs"); if main_file_path.exists() && main_file_path.is_file()
Ok(()) }
{ let mut main_file_contents = std::fs::read_to_string(&main_file_path)?; main_file_contents = main_file_contents.replace("web::scope(\"/api\")", &format!("web::scope(\"/api\")\n .service(services::{}::endpoints(web::scope(\"{}\")))", service_file_name, service_base_endpoint_path)); std::fs::write(main_file_path, main_file_contents)?; }
test_abstract_numbers.py
"""Unit tests for numbers.py.""" import math import unittest from numbers import Complex, Real, Rational, Integral from test import test_support class TestNumbers(unittest.TestCase): def test_int(self): self.assertTrue(issubclass(int, Integral)) self.assertTrue(issubclass(int, Complex)) self.assertEqual(7, int(7).real) self.assertEqual(0, int(7).imag) self.assertEqual(7, int(7).conjugate()) self.assertEqual(7, int(7).numerator) self.assertEqual(1, int(7).denominator) def test_long(self): self.assertTrue(issubclass(long, Integral)) self.assertTrue(issubclass(long, Complex)) self.assertEqual(7, long(7).real) self.assertEqual(0, long(7).imag) self.assertEqual(7, long(7).conjugate()) self.assertEqual(7, long(7).numerator) self.assertEqual(1, long(7).denominator) def test_float(self): self.assertFalse(issubclass(float, Rational)) self.assertTrue(issubclass(float, Real)) self.assertEqual(7.3, float(7.3).real) self.assertEqual(0, float(7.3).imag) self.assertEqual(7.3, float(7.3).conjugate()) def test_complex(self): self.assertFalse(issubclass(complex, Real)) self.assertTrue(issubclass(complex, Complex)) c1, c2 = complex(3, 2), complex(4,1) # XXX: This is not ideal, but see the comment in math_trunc(). self.assertRaises(AttributeError, math.trunc, c1) self.assertRaises(TypeError, float, c1) self.assertRaises(TypeError, int, c1) def test_main(): test_support.run_unittest(TestNumbers) if __name__ == "__main__":
unittest.main()
initca.go
// Package initca contains code to initialise a certificate authority, // generating a new root key and certificate. package initca import ( "crypto/ecdsa" "crypto/elliptic" "crypto/rand" "crypto/rsa" "crypto/x509" "encoding/pem" "errors" "io/ioutil" "time" "github.com/FiloSottile/BERserk/_vendor/github.com/cloudflare/cfssl/config" "github.com/FiloSottile/BERserk/_vendor/github.com/cloudflare/cfssl/csr" cferr "github.com/FiloSottile/BERserk/_vendor/github.com/cloudflare/cfssl/errors" "github.com/FiloSottile/BERserk/_vendor/github.com/cloudflare/cfssl/helpers" "github.com/FiloSottile/BERserk/_vendor/github.com/cloudflare/cfssl/log" "github.com/FiloSottile/BERserk/_vendor/github.com/cloudflare/cfssl/signer" "github.com/FiloSottile/BERserk/_vendor/github.com/cloudflare/cfssl/signer/local" ) // validator contains the default validation logic for certificate // requests to the API server. This follows the Baseline Requirements // for the Issuance and Management of Publicly-Trusted Certificates, // v.1.1.6, from the CA/Browser Forum // (https://cabforum.org). Specifically, section 10.2.3 ("Information // Requirements"), states: // // "Applicant information MUST include, but not be limited to, at least one // Fully-Qualified Domain Name or IP address to be included in the Certificate’s // SubjectAltName extension." func validator(req *csr.CertificateRequest) error { if len(req.Hosts) == 0 { return cferr.Wrap(cferr.PolicyError, cferr.InvalidRequest, errors.New("missing hosts field")) } return nil } // New creates a new root certificate from the certificate request. func New(req *csr.CertificateRequest) (cert, key []byte, err error) {
// NewFromPEM creates a new root certificate from the key file passed in. func NewFromPEM(req *csr.CertificateRequest, keyFile string) (cert []byte, err error) { if req.CA != nil { if req.CA.Expiry != "" { CAPolicy.Default.ExpiryString = req.CA.Expiry CAPolicy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry) } if req.CA.PathLength != 0 { signer.MaxPathLen = req.CA.PathLength } } privData, err := ioutil.ReadFile(keyFile) if err != nil { return nil, err } priv, err := helpers.ParsePrivateKeyPEM(privData) if err != nil { return nil, err } var sigAlgo x509.SignatureAlgorithm switch priv := priv.(type) { case *rsa.PrivateKey: bitLength := priv.PublicKey.N.BitLen() switch { case bitLength >= 4096: sigAlgo = x509.SHA512WithRSA case bitLength >= 3072: sigAlgo = x509.SHA384WithRSA case bitLength >= 2048: sigAlgo = x509.SHA256WithRSA default: sigAlgo = x509.SHA1WithRSA } case *ecdsa.PrivateKey: switch priv.Curve { case elliptic.P521(): sigAlgo = x509.ECDSAWithSHA512 case elliptic.P384(): sigAlgo = x509.ECDSAWithSHA384 case elliptic.P256(): sigAlgo = x509.ECDSAWithSHA256 default: sigAlgo = x509.ECDSAWithSHA1 } default: sigAlgo = x509.UnknownSignatureAlgorithm } var tpl = x509.CertificateRequest{ Subject: req.Name(), SignatureAlgorithm: sigAlgo, DNSNames: req.Hosts, } certReq, err := x509.CreateCertificateRequest(rand.Reader, &tpl, priv) if err != nil { log.Errorf("failed to generate a CSR: %v", err) // The use of CertificateError was a matter of some // debate; it is the one edge case in which a new // error category specifically for CSRs might be // useful, but it was deemed that one edge case did // not a new category justify. err = cferr.Wrap(cferr.CertificateError, cferr.BadRequest, err) return } p := &pem.Block{ Type: "CERTIFICATE REQUEST", Bytes: certReq, } certReq = pem.EncodeToMemory(p) s, err := local.NewSigner(priv, nil, signer.DefaultSigAlgo(priv), nil) if err != nil { log.Errorf("failed to create signer: %v", err) return } s.SetPolicy(CAPolicy) signReq := signer.SignRequest{Request: string(certReq)} cert, err = s.Sign(signReq) return } // CAPolicy contains the CA issuing policy as default policy. var CAPolicy = &config.Signing{ Default: &config.SigningProfile{ Usage: []string{"cert sign", "crl sign"}, ExpiryString: "43800h", Expiry: 5 * helpers.OneYear, CA: true, }, }
if req.CA != nil { if req.CA.Expiry != "" { CAPolicy.Default.ExpiryString = req.CA.Expiry CAPolicy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry) } if req.CA.PathLength != 0 { signer.MaxPathLen = req.CA.PathLength } } g := &csr.Generator{Validator: validator} csr, key, err := g.ProcessRequest(req) if err != nil { log.Errorf("failed to process request: %v", err) key = nil return } priv, err := helpers.ParsePrivateKeyPEM(key) if err != nil { log.Errorf("failed to parse private key: %v", err) return } s, err := local.NewSigner(priv, nil, signer.DefaultSigAlgo(priv), nil) if err != nil { log.Errorf("failed to create signer: %v", err) return } s.SetPolicy(CAPolicy) signReq := signer.SignRequest{Request: string(csr)} cert, err = s.Sign(signReq) return }
config.rs
//! Provides access to the symbolserver config use std::env; use std::fs; use std::path::{Path, PathBuf}; use std::borrow::Cow; use std::io::BufReader; use num_cpus; use serde_yaml; use url::Url; use rusoto::Region; use chrono::Duration; use log::LogLevelFilter; use super::{Result, ResultExt, ErrorKind}; use super::utils::{is_docker, IgnorePatterns}; #[derive(Deserialize, Debug, Default, Clone)] struct AwsConfig { access_key: Option<String>, secret_key: Option<String>, bucket_url: Option<String>, region: Option<String>, } #[derive(Deserialize, Debug, Default, Clone)] struct ServerConfig { host: Option<String>, port: Option<u16>, healthcheck_interval: Option<i64>, threads: Option<usize>, } #[derive(Deserialize, Debug, Default, Clone)] struct LogConfig { level: Option<String>, file: Option<PathBuf>, } #[derive(Deserialize, Debug, Default, Clone)] struct SyncConfig { #[serde(default)] ignore: IgnorePatterns, interval: Option<i64>, } /// Central config object that exposes the information from /// the symbolserver yaml config. #[derive(Deserialize, Debug, Default, Clone)] pub struct Config { #[serde(default)] aws: AwsConfig, #[serde(default)] server: ServerConfig, #[serde(default)] log: LogConfig, symbol_dir: Option<PathBuf>, #[serde(default)] sync: SyncConfig, } impl Config { /// Loads a config from a given file pub fn load_file<P: AsRef<Path>>(path: P) -> Result<Config> { let f = fs::File::open(path)?; serde_yaml::from_reader(BufReader::new(f)).map_err(|err| { ErrorKind::ConfigError(err).into() }) } /// Loads a config from the default location pub fn load_default() -> Result<Config> { let mut home = match env::home_dir() { Some(home) => home, None => { return Ok(Default::default()) }, }; home.push(".sentry-symbolserver.yml"); Ok(if let Ok(_) = fs::metadata(&home) { Config::load_file(&home)? } else { Default::default() }) } /// Return the AWS access key pub fn get_aws_access_key<'a>(&'a self) -> Option<&str> { self.aws.access_key.as_ref().map(|x| &**x) } /// Return the AWS secret key pub fn get_aws_secret_key<'a>(&'a self) -> Option<&str> { self.aws.secret_key.as_ref().map(|x| &**x) } /// Return the AWS S3 bucket URL pub fn get_aws_bucket_url<'a>(&'a self) -> Result<Url> { let url = if let Some(ref value) = self.aws.bucket_url { Url::parse(value)? } else if let Ok(value) = env::var("SYMBOLSERVER_BUCKET_URL") { Url::parse(&value)? } else { return Err(ErrorKind::MissingConfigKey( "aws.bucket_url").into()); }; if url.scheme() != "s3" { return Err(ErrorKind::BadConfigKey( "aws.bucket_url", "The scheme for the bucket URL needs to be s3").into()); } else if url.host_str().is_none() { return Err(ErrorKind::BadConfigKey( "aws.bucket_url", "The bucket URL is missing a name").into()); } Ok(url) } /// Overrides the AWS bucket URL. pub fn set_aws_bucket_url(&mut self, value: &str) { self.aws.bucket_url = Some(value.to_string()); } /// Return the AWS region pub fn get_aws_region(&self) -> Result<Region> { let region_opt = self.aws.region .as_ref() .map(|x| x.to_string()) .or_else(|| env::var("AWS_DEFAULT_REGION").ok()); if let Some(region) = region_opt { if let Ok(rv) = region.parse() { Ok(rv) } else { Err(ErrorKind::BadConfigKey(
} } else { Ok(Region::UsEast1) } } /// Overrides the AWS region pub fn set_aws_region(&mut self, value: Region) { self.aws.region = Some(value.to_string()); } /// Return the path where symbols are stored. pub fn get_symbol_dir<'a>(&'a self) -> Result<Cow<'a, Path>> { if let Some(ref path) = self.symbol_dir { Ok(Cow::Borrowed(path.as_path())) } else if let Ok(dir) = env::var("SYMBOLSERVER_SYMBOL_DIR") { Ok(Cow::Owned(PathBuf::from(dir))) } else { Err(ErrorKind::MissingConfigKey("symbol_dir").into()) } } /// Override the symbol dir. pub fn set_symbol_dir<P: AsRef<Path>>(&mut self, value: P) { self.symbol_dir = Some(value.as_ref().to_path_buf()); } fn get_server_host(&self) -> Result<String> { if let Some(ref host) = self.server.host { Ok(host.clone()) } else if let Ok(var) = env::var("IP") { Ok(var) } else if is_docker() { Ok("0.0.0.0".into()) } else { Ok("127.0.0.1".into()) } } fn get_server_port(&self) -> Result<u16> { if let Some(port) = self.server.port { Ok(port) } else if let Ok(portstr) = env::var("PORT") { Ok(portstr.parse().chain_err(|| "Invalid value for port")?) } else { Ok(3000) } } /// Return the bind target for the http server pub fn get_server_socket_addr(&self) -> Result<(String, u16)> { Ok((self.get_server_host()?, self.get_server_port()?)) } /// Return the server healthcheck interval pub fn get_server_healthcheck_interval(&self) -> Result<Duration> { let ttl = if let Some(ttl) = self.server.healthcheck_interval { ttl } else if let Ok(ttlstr) = env::var("SYMBOLSERVER_HEALTHCHECK_INTERVAL") { ttlstr.parse().chain_err(|| "Invalid value for healthcheck interval")? } else { return Ok(Duration::seconds(30)); }; if ttl < 0 { return Err(ErrorKind::BadConfigKey( "server.healthcheck_interval", "Healthcheck interval has to be positive").into()); } Ok(Duration::seconds(ttl)) } /// Return the server sync interval pub fn get_server_sync_interval(&self) -> Result<Duration> { let interval = if let Some(interval) = self.sync.interval { interval } else if let Ok(intervalstr) = env::var("SYMBOLSERVER_SYNC_INTERVAL") { intervalstr.parse().chain_err(|| "Invalid value for sync interval")? } else { return Ok(Duration::minutes(1)); }; if interval < 0 { return Err(ErrorKind::BadConfigKey( "sync.interval", "Sync interval has to be positive").into()); } Ok(Duration::seconds(interval)) } /// Return the number of threads to listen on pub fn get_server_threads(&self) -> Result<usize> { if let Some(threads) = self.server.threads { Ok(threads) } else if let Ok(threadstr) = env::var("SYMBOLSERVER_THREADS") { Ok(threadstr.parse().chain_err(|| "Invalid value for thread count")?) } else { Ok(num_cpus::get() * 5 / 4) } } /// Return the log level filter pub fn get_log_level_filter(&self) -> Result<LogLevelFilter> { let level_opt = self.log.level .as_ref() .map(|x| x.to_string()) .or_else(|| env::var("SYMBOLSERVER_LOG_LEVEL").ok()); if let Some(lvl) = level_opt { lvl.parse().map_err(|_| ErrorKind::BadConfigKey( "log.level", "unknown log level").into()) } else { Ok(LogLevelFilter::Info) } } /// Override the log level filter in the config pub fn set_log_level_filter(&mut self, value: LogLevelFilter) { self.log.level = Some(value.to_string()); } /// Return the log filename pub fn get_log_filename<'a>(&'a self) -> Result<Option<Cow<'a, Path>>> { if let Some(ref path) = self.log.file { Ok(Some(Cow::Borrowed(&*path))) } else if let Ok(path) = env::var("SYMBOLSERVER_LOG_FILE") { Ok(Some(Cow::Owned(PathBuf::from(path)))) } else { Ok(None) } } /// Return the sync ignore patterns pub fn get_ignore_patterns(&self) -> Result<&IgnorePatterns> { Ok(&self.sync.ignore) } }
"aws.region", "An unknown AWS region was provided").into())
IListeners.ts
/// <reference path="../IListeners.ts" /> module xlib.ui.element.elements.strong { import IEvent = elements.IEvent; export interface IListeners<T> extends elements.IListeners<T> { click?: (event?: IEvent<T>) => void; dblclick?: (event?: IEvent<T>) => void; mousedown?: (event?: IEvent<T>) => void; mouseup?: (event?: IEvent<T>) => void; mouseover?: (event?: IEvent<T>) => void; mousemove?: (event?: IEvent<T>) => void; mouseout?: (event?: IEvent<T>) => void;
} }
keypress?: (event?: IEvent<T>) => void; keydown?: (event?: IEvent<T>) => void; keyup?: (event?: IEvent<T>) => void;
InstanceTopologyEventsHTMLVisitor.py
# =============================================================================== # NAME: InstanceTopologyHTMLVisitor.py # # DESCRIPTION: A visitor responsible for the generation of HTML tables # of event ID's, etc. # # AUTHOR: reder # EMAIL: [email protected] # DATE CREATED : Sep. 13, 2016 # # Copyright 2016, California Institute of Technology. # ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged. # =============================================================================== # # Python standard modules # import logging import os import sys from fprime_ac.generators import formatters # from fprime_ac.utils import DiffAndRename from fprime_ac.generators.visitors import AbstractVisitor from fprime_ac.models import ModelParser # # Python extention modules and custom interfaces # # from Cheetah import Template # from fprime_ac.utils import version from fprime_ac.utils import ConfigManager # # Import precompiled templates here # try: from fprime_ac.generators.templates.html import HtmlEventsTablePage except ImportError: print("ERROR: must generate python templates first.") sys.exit(-1) # # Universal globals used within module go here. # (DO NOT USE MANY!) # # Global logger init. below. PRINT = logging.getLogger("output") DEBUG = logging.getLogger("debug") # # Module class or classes go here. class InstanceTopologyEventsHTMLVisitor(AbstractVisitor.AbstractVisitor): """ A visitor class responsible for generation of component header classes in C++. """ __instance = None __config = None
__model_parser = None def __init__(self): """ Constructor. """ super().__init__() # self.initBase(self, "HTMLCmdTable") self.__config = ConfigManager.ConfigManager.getInstance() self.__form = formatters.Formatters() self.__form_comment = formatters.CommentFormatters() self.__model_parser = ModelParser.ModelParser.getInstance() self.__cmd_dir = "events" DEBUG.info("InstanceTopologyHTMLVisitor: Instanced.") self.bodytext = "" self.prototypetext = "" self.__fp_dict = ( dict() ) # dictionary of instance name keyword to file handle pointer def _writeTmpl(self, instance, c, visit_str): """ Wrapper to write tmpl to files desc. """ DEBUG.debug("InstanceTopologyHTMLVisitor:%s" % visit_str) DEBUG.debug("===================================") DEBUG.debug(c) self.__fp_dict[instance].writelines(c.__str__()) DEBUG.debug("===================================") def initFilesVisit(self, obj): """ Defined to generate files for generated code products. @parms obj: the instance of the model to visit. """ # Check for command dir here and if none creat it but always switch into it if not os.path.exists(self.__cmd_dir): os.mkdir(self.__cmd_dir) os.chdir(self.__cmd_dir) # Iterate over types for k in list(obj.get_base_id_dict().keys()): tlist = obj.get_base_id_dict()[k] # print "Type: %s\n" % k, # Iterate over instances and get name # Open file if events exist if not do nothing for t in tlist: # print "\tInstance: %s, Base ID: %s\n" % (t[0],t[1]) name = t[0] events_list = t[3].get_comp_xml().get_events() if len(events_list) > 0: filename = "%s_events.html" % t[0] # Open file for writing here... DEBUG.info("Open file: %s" % filename) try: self.__fp_dict[name] = open(filename, "w") DEBUG.info("Completed") except OSError: PRINT.info("Could not open %s file." % filename) sys.exit(-1) DEBUG.info( "Generating HTML Event Table for %s:%s component instance..." % (t[0], k) ) os.chdir("..") def startSourceFilesVisit(self, obj): """ Defined to generate starting static code within files. """ def includes1Visit(self, obj): """ Defined to generate includes within a file. Usually used for the base classes but also for Port types @parms args: the instance of the concrete element to operation on. """ def includes2Visit(self, obj): """ Defined to generate internal includes within a file. Usually used for data type includes and system includes. @parms args: the instance of the concrete element to operation on. """ def namespaceVisit(self, obj): """ Defined to generate namespace code within a file. Also any pre-condition code is generated. @parms args: the instance of the concrete element to operation on. """ def eventArgsStr(self): """ Make a list of event args into a string """ def f(args): def g(lst): name = lst[0] return name return self.argsString(list(map(g, args))) return f def publicVisit(self, obj): """ Defined to generate public stuff within a class. @parms args: the instance of the concrete element to operation on. """ # os.chdir(self.__cmd_dir) c = HtmlEventsTablePage.HtmlEventsTablePage() for k in list(obj.get_base_id_dict().keys()): tlist = obj.get_base_id_dict()[k] # print "Type: %s\n" % k, for t in tlist: if t[0] in list(self.__fp_dict.keys()): # print "\tInstance: %s, Base ID: %s\n" % (t[0],t[1]) eobj = t[3].get_comp_xml() c.name = "{}:{}".format(t[0], k) c.base_id = t[1] c.has_events = len(eobj.get_events()) > 0 c.events = self.__model_parser.getEventsList(eobj) c.event_enums = self.__model_parser.getEventEnumList(eobj) c.event_args = self.__model_parser.getEventArgsDict(eobj) c.event_params = c.event_args c.event_args_str = self.eventArgsStr() c.event_param_strs = self.__model_parser.getEventArgsPrototypeStringDict( eobj ) self._writeTmpl(t[0], c, "InstanceTopologyEventsHTML_Visitor") def protectedVisit(self, obj): """ Defined to generate protected stuff within a class. @parms args: the instance of the concrete element to operation on. """ def privateVisit(self, obj): """ Defined to generate private stuff within a class. @parms args: the instance of the concrete element to operation on. """ def finishSourceFilesVisit(self, obj): """ Defined to generate ending static code within files. """ for fp in list(self.__fp_dict.keys()): self.__fp_dict[fp].close() PRINT.info("Completed generating HTML event tables...")
__fp_dict = None __form = None __form_comment = None
router.go
package server import ( "context" "encoding/json" "fmt" "net/http" "net/http/pprof" "time" serverTypes "github.com/alibaba/pouch/apis/server/types" "github.com/alibaba/pouch/apis/types" "github.com/alibaba/pouch/pkg/errtypes" "github.com/alibaba/pouch/pkg/httputils" "github.com/alibaba/pouch/pkg/utils" "github.com/gorilla/mux" "github.com/sirupsen/logrus" ) // versionMatcher defines to parse version url path. const versionMatcher = "/v{version:[0-9.]+}" func initRoute(s *Server) *mux.Router { r := mux.NewRouter() handlers := []*serverTypes.HandlerSpec{ // system {Method: http.MethodGet, Path: "/_ping", HandlerFunc: s.ping}, {Method: http.MethodGet, Path: "/info", HandlerFunc: s.info}, {Method: http.MethodGet, Path: "/version", HandlerFunc: s.version}, {Method: http.MethodPost, Path: "/auth", HandlerFunc: s.auth}, {Method: http.MethodGet, Path: "/events", HandlerFunc: withCancelHandler(s.events)}, // daemon, we still list this API into system manager. {Method: http.MethodPost, Path: "/daemon/update", HandlerFunc: s.updateDaemon}, // container {Method: http.MethodPost, Path: "/containers/{name:.*}/checkpoints", HandlerFunc: withCancelHandler(s.createContainerCheckpoint)}, {Method: http.MethodGet, Path: "/containers/{name:.*}/checkpoints", HandlerFunc: withCancelHandler(s.listContainerCheckpoint)}, {Method: http.MethodDelete, Path: "/containers/{name}/checkpoints/{id}", HandlerFunc: withCancelHandler(s.deleteContainerCheckpoint)}, {Method: http.MethodPost, Path: "/containers/create", HandlerFunc: s.createContainer}, {Method: http.MethodPost, Path: "/containers/{name:.*}/start", HandlerFunc: s.startContainer}, {Method: http.MethodPost, Path: "/containers/{name:.*}/stop", HandlerFunc: s.stopContainer}, {Method: http.MethodPost, Path: "/containers/{name:.*}/attach", HandlerFunc: s.attachContainer}, {Method: http.MethodGet, Path: "/containers/json", HandlerFunc: s.getContainers}, {Method: http.MethodGet, Path: "/containers/{name:.*}/json", HandlerFunc: s.getContainer}, {Method: http.MethodDelete, Path: "/containers/{name:.*}", HandlerFunc: s.removeContainers}, {Method: http.MethodPost, Path: "/containers/{name:.*}/exec", HandlerFunc: s.createContainerExec}, {Method: http.MethodGet, Path: "/exec/{name:.*}/json", HandlerFunc: s.getExecInfo}, {Method: http.MethodPost, Path: "/exec/{name:.*}/start", HandlerFunc: s.startContainerExec}, {Method: http.MethodPost, Path: "/exec/{name:.*}/resize", HandlerFunc: s.resizeExec}, {Method: http.MethodPost, Path: "/containers/{name:.*}/rename", HandlerFunc: s.renameContainer}, {Method: http.MethodPost, Path: "/containers/{name:.*}/restart", HandlerFunc: s.restartContainer}, {Method: http.MethodPost, Path: "/containers/{name:.*}/pause", HandlerFunc: s.pauseContainer}, {Method: http.MethodPost, Path: "/containers/{name:.*}/unpause", HandlerFunc: s.unpauseContainer}, {Method: http.MethodPost, Path: "/containers/{name:.*}/update", HandlerFunc: s.updateContainer}, {Method: http.MethodPost, Path: "/containers/{name:.*}/upgrade", HandlerFunc: s.upgradeContainer}, {Method: http.MethodGet, Path: "/containers/{name:.*}/top", HandlerFunc: s.topContainer}, {Method: http.MethodGet, Path: "/containers/{name:.*}/logs", HandlerFunc: withCancelHandler(s.logsContainer)}, {Method: http.MethodGet, Path: "/containers/{name:.*}/stats", HandlerFunc: withCancelHandler(s.statsContainer)}, {Method: http.MethodPost, Path: "/containers/{name:.*}/resize", HandlerFunc: s.resizeContainer}, {Method: http.MethodPost, Path: "/containers/{name:.*}/restart", HandlerFunc: s.restartContainer}, {Method: http.MethodPost, Path: "/containers/{name:.*}/wait", HandlerFunc: withCancelHandler(s.waitContainer)}, {Method: http.MethodPost, Path: "/commit", HandlerFunc: withCancelHandler(s.commitContainer)}, // image {Method: http.MethodPost, Path: "/images/create", HandlerFunc: s.pullImage}, {Method: http.MethodPost, Path: "/images/search", HandlerFunc: s.searchImages}, {Method: http.MethodGet, Path: "/images/json", HandlerFunc: s.listImages}, {Method: http.MethodDelete, Path: "/images/{name:.*}", HandlerFunc: s.removeImage}, {Method: http.MethodGet, Path: "/images/{name:.*}/json", HandlerFunc: s.getImage}, {Method: http.MethodPost, Path: "/images/{name:.*}/tag", HandlerFunc: s.postImageTag}, {Method: http.MethodPost, Path: "/images/load", HandlerFunc: withCancelHandler(s.loadImage)}, {Method: http.MethodGet, Path: "/images/save", HandlerFunc: withCancelHandler(s.saveImage)}, {Method: http.MethodGet, Path: "/images/{name:.*}/history", HandlerFunc: s.getImageHistory}, {Method: http.MethodPost, Path: "/images/{name:.*}/push", HandlerFunc: s.pushImage}, // volume {Method: http.MethodGet, Path: "/volumes", HandlerFunc: s.listVolume}, {Method: http.MethodPost, Path: "/volumes/create", HandlerFunc: s.createVolume}, {Method: http.MethodGet, Path: "/volumes/{name:.*}", HandlerFunc: s.getVolume}, {Method: http.MethodDelete, Path: "/volumes/{name:.*}", HandlerFunc: s.removeVolume}, // network {Method: http.MethodGet, Path: "/networks", HandlerFunc: s.listNetwork}, {Method: http.MethodPost, Path: "/networks/create", HandlerFunc: s.createNetwork}, {Method: http.MethodGet, Path: "/networks/{id:.*}", HandlerFunc: s.getNetwork}, {Method: http.MethodDelete, Path: "/networks/{id:.*}", HandlerFunc: s.deleteNetwork}, {Method: http.MethodPost, Path: "/networks/{id:.*}/connect", HandlerFunc: s.connectToNetwork}, {Method: http.MethodPost, Path: "/networks/{id:.*}/disconnect", HandlerFunc: s.disconnectNetwork}, // metrics {Method: http.MethodGet, Path: "/metrics", HandlerFunc: s.metrics}, // cri stream {Method: http.MethodGet, Path: "/exec/{token}", HandlerFunc: s.criExec}, {Method: http.MethodPost, Path: "/exec/{token}", HandlerFunc: s.criExec}, {Method: http.MethodGet, Path: "/attach/{token}", HandlerFunc: s.criAttach}, {Method: http.MethodPost, Path: "/attach/{token}", HandlerFunc: s.criAttach}, {Method: http.MethodGet, Path: "/portforward/{token}", HandlerFunc: s.criPortForward}, {Method: http.MethodPost, Path: "/portforward/{token}", HandlerFunc: s.criPortForward}, } if s.APIPlugin != nil { handlers = s.APIPlugin.UpdateHandler(handlers) } // register API for _, h := range handlers { if h != nil { r.Path(versionMatcher + h.Path).Methods(h.Method).Handler(filter(h.HandlerFunc, s)) r.Path(h.Path).Methods(h.Method).Handler(filter(h.HandlerFunc, s)) } } if s.Config.Debug || s.Config.EnableProfiler { profilerSetup(r) } return r } func profilerSetup(mainRouter *mux.Router) { var r = mainRouter.PathPrefix("/debug/").Subrouter() r.HandleFunc("/pprof/", pprof.Index) r.HandleFunc("/pprof/cmdline", pprof.Cmdline) r.HandleFunc("/pprof/profile", pprof.Profile) r.HandleFunc("/pprof/symbol", pprof.Symbol) r.HandleFunc("/pprof/trace", pprof.Trace) r.HandleFunc("/pprof/block", pprof.Handler("block").ServeHTTP) r.HandleFunc("/pprof/heap", pprof.Handler("heap").ServeHTTP) r.HandleFunc("/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) r.HandleFunc("/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) } // withCancelHandler will use context to cancel the handler. Otherwise, if the // the connection has been cut by the client or firewall, the server handler // will hang and cause goroutine leak. func withCancelHandler(h serverTypes.Handler) serverTypes.Handler { return func(ctx context.Context, rw http.ResponseWriter, req *http.Request) error { notifier, ok := rw.(http.CloseNotifier) if !ok { return h(ctx, rw, req) } var cancel context.CancelFunc ctx, cancel = context.WithCancel(ctx) waitCh := make(chan struct{}) defer close(waitCh) // NOTE: in order to avoid the race , we should get the // channel before select. // // Related issue: https://github.com/grpc-ecosystem/grpc-gateway/pull/120. closeNotify := notifier.CloseNotify() go func() { select { case <-closeNotify: cancel() case <-waitCh: } }() return h(ctx, rw, req) } } func filter(handler serverTypes.Handler, s *Server) http.HandlerFunc
// EncodeResponse encodes response in json. func EncodeResponse(rw http.ResponseWriter, statusCode int, data interface{}) error { rw.Header().Set("Content-Type", "application/json") rw.WriteHeader(statusCode) return json.NewEncoder(rw).Encode(data) } // HandleErrorResponse handles err from daemon side and constructs response for client side. func HandleErrorResponse(w http.ResponseWriter, err error) { var ( code int errMsg string ) // By default, daemon side returns code 500 if error happens. code = http.StatusInternalServerError errMsg = err.Error() httpErr, ok := err.(httputils.HTTPError) if ok { code = httpErr.Code() } else if errtypes.IsNotfound(err) { code = http.StatusNotFound } else if errtypes.IsInvalidParam(err) { code = http.StatusBadRequest } else if errtypes.IsAlreadyExisted(err) { code = http.StatusConflict } else if errtypes.IsNotModified(err) { code = http.StatusNotModified } w.Header().Set("Content-Type", "application/json") w.WriteHeader(code) enc := json.NewEncoder(w) enc.SetEscapeHTML(false) resp := types.Error{ Message: errMsg, } enc.Encode(resp) }
{ pctx := context.Background() return func(w http.ResponseWriter, req *http.Request) { ctx, cancel := context.WithCancel(pctx) defer cancel() s.lock.RLock() if len(s.ManagerWhiteList) > 0 && req.TLS != nil && len(req.TLS.PeerCertificates) > 0 { if _, isManager := s.ManagerWhiteList[req.TLS.PeerCertificates[0].Subject.CommonName]; !isManager { s.lock.RUnlock() w.WriteHeader(http.StatusForbidden) w.Write([]byte("tls verified error.")) return } } s.lock.RUnlock() t := time.Now() clientInfo := req.RemoteAddr defer func() { d := time.Since(t) / (time.Millisecond) // If elapse time of handler >= 500ms, log request. if d >= 500 { logrus.Infof("End of Calling %s %s, costs %d ms. client %s", req.Method, req.URL.Path, d, clientInfo) } }() if req.TLS != nil && len(req.TLS.PeerCertificates) > 0 { issuer := req.TLS.PeerCertificates[0].Issuer.CommonName clientName := req.TLS.PeerCertificates[0].Subject.CommonName ctx = utils.SetTLSIssuer(ctx, issuer) ctx = utils.SetTLSCommonName(ctx, clientName) clientInfo = fmt.Sprintf("%s %s %s", clientInfo, issuer, clientName) } if req.Method != http.MethodGet { logrus.Infof("Calling %s %s, client %s", req.Method, req.URL.RequestURI(), clientInfo) } else { logrus.Debugf("Calling %s %s, client %s", req.Method, req.URL.RequestURI(), clientInfo) } // Start to handle request. err := handler(ctx, w, req) if err == nil { return } // Handle error if request handling fails. logrus.Errorf("Handler for %s %s, client %s returns error: %s", req.Method, req.URL.RequestURI(), clientInfo, err) HandleErrorResponse(w, err) } }
IBCarbonscan.py
# IBCarbonscan.py import sys from bgenlocations import TOOLBOXDIR, BGENDIR sys.path.append(BGENDIR) from scantools import Scanner_OSX def main(): print "---Scanning IBCarbonRuntime.h---" input = ["IBCarbonRuntime.h"] output = "IBCarbongen.py" defsoutput = TOOLBOXDIR + "IBCarbonRuntime.py" scanner = IBCarbon_Scanner(input, output, defsoutput) scanner.scan() scanner.close() print "=== Testing definitions output code ===" execfile(defsoutput, {}, {}) print "--done scanning, importing--" import IBCarbonsupport print "done" class IBCarbon_Scanner(Scanner_OSX): def destination(self, type, name, arglist): classname = "IBCarbonFunction" listname = "functions" if arglist: t, n, m = arglist[0] if t == "IBNibRef" and m == "InMode": classname = "IBCarbonMethod" listname = "methods" return classname, listname def makeblacklistnames(self): return [ "DisposeNibReference", # taken care of by destructor "CreateNibReferenceWithCFBundle", ## need to wrap CFBundle.h properly first ] def
(self): return [] if __name__ == "__main__": main()
makerepairinstructions
identifier.rs
use crate::{ErrorKind, Result, ResultExt}; use heck::{ToPascalCase, ToSnakeCase}; use proc_macro2::Ident; pub trait ToIdent: ToOwned { fn to_ident(&self) -> Result<Ident>; } impl ToIdent for str { fn to_ident(&self) -> Result<Ident> { parse_ident(self) } } pub trait CamelCaseIdent: ToOwned { fn to_camel_case_id(&self) -> String; fn to_camel_case_ident(&self) -> Result<Ident>; } impl CamelCaseIdent for str { fn to_camel_case_id(&self) -> String { let is_number = starts_with_number(self); let mut txt = replace_first(self, true, true); txt = replace_first(&txt, true, false); txt = replace_special_chars(&txt); if !is_number { // will remove underscores txt = txt.to_pascal_case(); } txt } fn to_camel_case_ident(&self) -> Result<Ident> { self.to_camel_case_id().to_ident() } } pub trait SnakeCaseIdent: ToOwned { fn to_snake_case_id(&self) -> String; fn to_snake_case_ident(&self) -> Result<Ident>; } impl SnakeCaseIdent for str { fn to_snake_case_id(&self) -> String { let mut txt = replace_first(self, false, true); txt = replace_special_chars(&txt); txt = txt.to_snake_case(); suffix_keyword(&txt) } fn to_snake_case_ident(&self) -> Result<Ident> { self.to_snake_case_id().to_ident() } } pub fn id(text: &str) -> String { let mut txt = replace_first(text, false, false); txt = replace_special_chars(&txt); txt = remove_spaces(&txt); txt = suffix_keyword(&txt); txt } pub fn parse_ident(text: &str) -> Result<Ident> { syn::parse_str::<Ident>(&id(text)).with_context(ErrorKind::Parse, || format!("parse ident {text}")) } fn remove_spaces(text: &str) -> String { text.replace(' ', "") } /// replace special characters with underscores fn replace_special_chars(text: &str) -> String { let mut txt = text.replace('.', "_"); txt = txt.replace(',', "_"); txt = txt.replace('-', "_"); txt = txt.replace('/', "_"); txt = txt.replace('*', "_"); txt = txt.replace(':', "_"); txt } fn starts_with_number(text: &str) -> bool { match text.chars().next() { Some(ch) => ch.is_numeric(), None => false, } } fn unicode(c: char, uppercase: bool) -> String { let s = c.escape_unicode().to_string(); let u = if uppercase { 'U' } else { 'u' }; format!("{}{}", u, &s[3..s.len() - 1]) } fn replace_first(text: &str, uppercase: bool, remove: bool) -> String { let first = text.chars().next().unwrap_or_default(); if first.is_numeric() { let n = if uppercase { 'N' } else { 'n' }; format!("{}{}", n, text) } else if !first.is_ascii_alphanumeric() { if text.len() > 1 { if remove { text[1..].to_owned() } else { format!("{}{}", unicode(first, uppercase), &text[1..]) } } else { unicode(first, uppercase) } } else { text.to_owned() } } /// add an underscore suffix it is a keyword fn suffix_keyword(text: &str) -> String { if is_keyword(text) { format!("{}_", text) } else { text.to_owned() } } fn is_keyword(word: &str) -> bool { matches!( word, // https://doc.rust-lang.org/grammar.html#keywords "abstract" | "alignof" | "as" | "async" | "become" | "box" | "break" | "const" | "continue" | "crate" | "do" | "else" | "enum" | "extern" | "false" | "final" | "fn" | "for" | "if" | "impl" | "in" | "let" | "loop" | "macro" | "match" | "mod" | "move" | "mut" | "offsetof" | "override" | "priv" | "proc" | "pub" | "pure" | "ref" | "return" | "Self" | "self" | "sizeof" | "static" | "struct" | "super" | "trait" | "true" | "type" | "typeof" | "unsafe" | "unsized" | "use" | "virtual" | "where" | "while" | "yield" ) } #[cfg(test)] mod tests { use super::*; use heck::ToSnakeCase; #[test] fn test_unicode() -> Result<()> { assert_eq!(unicode(',', false), "u2c"); Ok(()) } #[test] fn
() -> Result<()> { assert_eq!(replace_first(".", false, false), "u2e"); assert_eq!(replace_first("/", false, false), "u2f"); assert_eq!(replace_first("", false, false), "u0"); Ok(()) } #[test] fn test_replace_special_chars() -> Result<()> { assert_eq!(replace_special_chars("."), "_"); assert_eq!(replace_special_chars(","), "_"); assert_eq!(replace_special_chars("-"), "_"); assert_eq!(replace_special_chars("/"), "_"); assert_eq!(replace_special_chars("*"), "_"); Ok(()) } #[test] fn test_odata_next_link() -> Result<()> { let idt = "odata.nextLink".to_snake_case(); let idt = id(&idt); assert_eq!(idt.to_string(), "odata_next_link"); Ok(()) } #[test] fn test_three_dot_two() -> Result<()> { let idt = id("3.2"); assert_eq!(idt.to_string(), "n3_2"); Ok(()) } #[test] fn test_system_assigned_user_assigned() -> Result<()> { assert_eq!("SystemAssigned, UserAssigned".to_camel_case_id(), "SystemAssignedUserAssigned"); Ok(()) } #[test] fn test_gcm_aes_128() -> Result<()> { assert_eq!("gcm-aes-128".to_camel_case_id(), "GcmAes128"); Ok(()) } #[test] fn test_5() -> Result<()> { assert_eq!("5".to_camel_case_id(), "N5"); Ok(()) } #[test] fn test_app_configuration() -> Result<()> { assert_eq!( "Microsoft.AppConfiguration/configurationStores".to_camel_case_id(), "MicrosoftAppConfigurationConfigurationStores" ); Ok(()) } #[test] fn test_microsoft_key_vault_vaults() -> Result<()> { assert_eq!("Microsoft.KeyVault/vaults".to_camel_case_id(), "MicrosoftKeyVaultVaults"); Ok(()) } #[test] fn test_azure_virtual_machine_best_practices() -> Result<()> { assert_eq!( "Azure virtual machine best practices - Dev/Test".to_camel_case_id(), "AzureVirtualMachineBestPracticesDevTest" ); Ok(()) } #[test] fn test_1_0() -> Result<()> { assert_eq!("1.0".to_camel_case_id(), "N1_0"); Ok(()) } #[test] fn test_async() -> Result<()> { assert_eq!("Async".to_snake_case_id(), "async_"); Ok(()) } #[test] fn test_attr_qualified_name() -> Result<()> { assert_eq!("attr:qualifiedName".to_snake_case_id(), "attr_qualified_name"); Ok(()) } #[test] fn test_filter() -> Result<()> { assert_eq!("$filter".to_snake_case_id(), "filter"); Ok(()) } #[test] fn test_odata_type() -> Result<()> { assert_eq!("@odata.type".to_camel_case_id(), "OdataType"); Ok(()) } #[test] fn test_10minutely() -> Result<()> { assert_eq!("_10minutely".to_camel_case_id(), "N10minutely"); Ok(()) } }
test_replace_first
_converters.py
# -*- coding: utf-8 -*- # This file was generated import nidcpower._visatype as _visatype import nidcpower.errors as errors import array import datetime import numbers from functools import singledispatch @singledispatch def _convert_repeated_capabilities(arg, prefix): # noqa: F811 '''Base version that should not be called Overall purpose is to convert the repeated capabilities to a list of strings with prefix from what ever form Supported types: - str - List (comma delimited) - str - Range (using '-' or ':') - str - single item - int - tuple - range - slice Each instance should return a list of strings, without prefix - '0' --> ['0'] - 0 --> ['0'] - '0, 1' --> ['0', '1'] - 'ScriptTrigger0, ScriptTrigger1' --> ['0', '1'] - '0-1' --> ['0', '1'] - '0:1' --> ['0', '1'] - '0-1,4' --> ['0', '1', '4'] - range(0, 2) --> ['0', '1'] - slice(0, 2) --> ['0', '1'] - (0, 1, 4) --> ['0', '1', '4'] - ('0-1', 4) --> ['0', '1', '4'] - (slice(0, 1), '2', [4, '5-6'], '7-9', '11:14', '16, 17') --> ['0', '2', '4', '5', '6', '7', '8', '9', '11', '12', '13', '14', '16', '17'] ''' raise errors.InvalidRepeatedCapabilityError('Invalid type', type(arg)) @_convert_repeated_capabilities.register(numbers.Integral) # noqa: F811 def _(repeated_capability, prefix): '''Integer version''' return [str(repeated_capability)] # This parsing function duplicate the parsing in the driver, so if changes to the allowed format are made there, they will need to be replicated here. @_convert_repeated_capabilities.register(str) # noqa: F811 def _(repeated_capability, prefix): '''String version (this is the most complex) We need to deal with a range ('0-3' or '0:3'), a list ('0,1,2,3') and a single item ''' # First we deal with a list rep_cap_list = repeated_capability.split(',') if len(rep_cap_list) > 1: # We have a list so call ourselves again to let the iterable instance handle it return _convert_repeated_capabilities(rep_cap_list, prefix) # Now we deal with ranges # We remove any prefix and change ':' to '-' r = repeated_capability.strip().replace(prefix, '').replace(':', '-') rc = r.split('-') if len(rc) > 1: if len(rc) > 2: raise errors.InvalidRepeatedCapabilityError("Multiple '-' or ':'", repeated_capability) start = int(rc[0]) end = int(rc[1]) if end < start: rng = range(start, end - 1, -1) else: rng = range(start, end + 1) return _convert_repeated_capabilities(rng, prefix) # If we made it here, it must be a simple item so we remove any prefix and return return [repeated_capability.replace(prefix, '').strip()] # We cannot use collections.abc.Iterable here because strings are also iterable and then this # instance is what gets called instead of the string one. @_convert_repeated_capabilities.register(list) # noqa: F811 @_convert_repeated_capabilities.register(range) # noqa: F811 @_convert_repeated_capabilities.register(tuple) # noqa: F811 def _(repeated_capability, prefix): '''Iterable version - can handle lists, ranges, and tuples''' rep_cap_list = [] for r in repeated_capability: rep_cap_list += _convert_repeated_capabilities(r, prefix) return rep_cap_list @_convert_repeated_capabilities.register(slice) # noqa: F811 def _(repeated_capability, prefix): '''slice version''' def ifnone(a, b): return b if a is None else a # Turn the slice into a list and call ourselves again to let the iterable instance handle it rng = range(ifnone(repeated_capability.start, 0), repeated_capability.stop, ifnone(repeated_capability.step, 1)) return _convert_repeated_capabilities(rng, prefix) def convert_repeated_capabilities(repeated_capability, prefix=''): '''Convert a repeated capabilities object to a comma delimited list Args: repeated_capability (str, list, tuple, slice, None) - prefix (str) - common prefix for all strings Returns: rep_cal_list (list of str) - list of each repeated capability item with ranges expanded and prefix added ''' # We need to explicitly handle None here. Everything else we can pass on to the singledispatch functions if repeated_capability is None: return []
def convert_repeated_capabilities_from_init(repeated_capability): '''Convert a repeated capabilities object to a comma delimited list Parameter list is so it can be called from the code generated __init__(). We know it is for channels when called this was so we use a prefix of '' Args: repeated_capability (str, list, tuple, slice, None) - Returns: rep_cal (str) - comma delimited string of each repeated capability item with ranges expanded ''' return ','.join(convert_repeated_capabilities(repeated_capability, '')) def _convert_timedelta(value, library_type, scaling): try: # We first assume it is a datetime.timedelta object scaled_value = value.total_seconds() * scaling except AttributeError: # If that doesn't work, assume it is a value in seconds # cast to float so scaled_value is always a float. This allows `timeout=10` to work as expected scaled_value = float(value) * scaling # ctype integer types don't convert to int from float so we need to if library_type in [_visatype.ViInt64, _visatype.ViInt32, _visatype.ViUInt32, _visatype.ViInt16, _visatype.ViUInt16, _visatype.ViInt8]: scaled_value = int(scaled_value) return library_type(scaled_value) def convert_timedelta_to_seconds_real64(value): return _convert_timedelta(value, _visatype.ViReal64, 1) def convert_timedelta_to_milliseconds_int32(value): return _convert_timedelta(value, _visatype.ViInt32, 1000) def convert_timedeltas_to_seconds_real64(values): return [convert_timedelta_to_seconds_real64(i) for i in values] def convert_seconds_real64_to_timedeltas(seconds): return [datetime.timedelta(seconds=i) for i in seconds] def convert_month_to_timedelta(months): return datetime.timedelta(days=(30.4167 * months)) # This converter is not called from the normal codegen path for function. Instead it is # call from init and is a special case. def convert_init_with_options_dictionary(values): if type(values) is str: init_with_options_string = values else: good_keys = { 'rangecheck': 'RangeCheck', 'queryinstrstatus': 'QueryInstrStatus', 'cache': 'Cache', 'simulate': 'Simulate', 'recordcoercions': 'RecordCoercions', 'interchangecheck': 'InterchangeCheck', 'driversetup': 'DriverSetup', 'range_check': 'RangeCheck', 'query_instr_status': 'QueryInstrStatus', 'record_coercions': 'RecordCoercions', 'interchange_check': 'InterchangeCheck', 'driver_setup': 'DriverSetup', } init_with_options = [] for k in sorted(values.keys()): value = None if k.lower() in good_keys and not good_keys[k.lower()] == 'DriverSetup': value = good_keys[k.lower()] + ('=1' if values[k] is True else '=0') elif k.lower() in good_keys and good_keys[k.lower()] == 'DriverSetup': if not isinstance(values[k], dict): raise TypeError('DriverSetup must be a dictionary') value = 'DriverSetup=' + (';'.join([key + ':' + values[k][key] for key in sorted(values[k])])) else: value = k + ('=1' if values[k] is True else '=0') init_with_options.append(value) init_with_options_string = ','.join(init_with_options) return init_with_options_string # convert value to bytes @singledispatch def _convert_to_bytes(value): # noqa: F811 pass @_convert_to_bytes.register(list) # noqa: F811 @_convert_to_bytes.register(bytes) # noqa: F811 @_convert_to_bytes.register(bytearray) # noqa: F811 @_convert_to_bytes.register(array.array) # noqa: F811 def _(value): return value @_convert_to_bytes.register(str) # noqa: F811 def _(value): return value.encode() def convert_to_bytes(value): # noqa: F811 return bytes(_convert_to_bytes(value)) # Let's run some tests def test_convert_init_with_options_dictionary(): assert convert_init_with_options_dictionary('') == '' assert convert_init_with_options_dictionary('Simulate=1') == 'Simulate=1' assert convert_init_with_options_dictionary({'Simulate': True, }) == 'Simulate=1' assert convert_init_with_options_dictionary({'Simulate': False, }) == 'Simulate=0' assert convert_init_with_options_dictionary({'Simulate': True, 'Cache': False}) == 'Cache=0,Simulate=1' assert convert_init_with_options_dictionary({'DriverSetup': {'Model': '5162 (4CH)', 'Bitfile': 'CustomProcessing'}}) == 'DriverSetup=Bitfile:CustomProcessing;Model:5162 (4CH)' assert convert_init_with_options_dictionary({'Simulate': True, 'DriverSetup': {'Model': '5162 (4CH)', 'Bitfile': 'CustomProcessing'}}) == 'DriverSetup=Bitfile:CustomProcessing;Model:5162 (4CH),Simulate=1' assert convert_init_with_options_dictionary({'simulate': True, 'cache': False}) == 'Cache=0,Simulate=1' assert convert_init_with_options_dictionary({'driver_setup': {'Model': '5162 (4CH)', 'Bitfile': 'CustomProcessing'}}) == 'DriverSetup=Bitfile:CustomProcessing;Model:5162 (4CH)' assert convert_init_with_options_dictionary({'simulate': True, 'driver_setup': {'Model': '5162 (4CH)', 'Bitfile': 'CustomProcessing'}}) == 'DriverSetup=Bitfile:CustomProcessing;Model:5162 (4CH),Simulate=1' # Tests - time def test_convert_timedelta_to_seconds_double(): test_result = convert_timedelta_to_seconds_real64(datetime.timedelta(seconds=10)) assert test_result.value == 10.0 assert isinstance(test_result, _visatype.ViReal64) test_result = convert_timedelta_to_seconds_real64(datetime.timedelta(seconds=-1)) assert test_result.value == -1 assert isinstance(test_result, _visatype.ViReal64) test_result = convert_timedelta_to_seconds_real64(10.5) assert test_result.value == 10.5 assert isinstance(test_result, _visatype.ViReal64) test_result = convert_timedelta_to_seconds_real64(-1) assert test_result.value == -1 assert isinstance(test_result, _visatype.ViReal64) def test_convert_timedelta_to_milliseconds_int32(): test_result = convert_timedelta_to_milliseconds_int32(datetime.timedelta(seconds=10)) assert test_result.value == 10000 assert isinstance(test_result, _visatype.ViInt32) test_result = convert_timedelta_to_milliseconds_int32(datetime.timedelta(seconds=-1)) assert test_result.value == -1000 assert isinstance(test_result, _visatype.ViInt32) test_result = convert_timedelta_to_milliseconds_int32(10.5) assert test_result.value == 10500 assert isinstance(test_result, _visatype.ViInt32) test_result = convert_timedelta_to_milliseconds_int32(-1) assert test_result.value == -1000 assert isinstance(test_result, _visatype.ViInt32) def test_convert_timedeltas_to_seconds_real64(): time_values = [10.5, -1] test_result = convert_timedeltas_to_seconds_real64(time_values) assert all([actual.value == expected for actual, expected in zip(test_result, time_values)]) assert all([isinstance(i, _visatype.ViReal64) for i in test_result]) timedeltas = [datetime.timedelta(seconds=s, milliseconds=ms) for s, ms in zip([10, -1], [500, 0])] test_result = convert_timedeltas_to_seconds_real64(timedeltas) assert all([actual.value == expected for actual, expected in zip(test_result, time_values)]) assert all([isinstance(i, _visatype.ViReal64) for i in test_result]) def test_convert_seconds_real64_to_timedeltas(): time_values = [10.5, -1] timedeltas = convert_seconds_real64_to_timedeltas(time_values) assert all([actual.total_seconds() == expected for actual, expected in zip(timedeltas, time_values)]) # Tests - repeated capabilities def test_repeated_capabilies_string_channel(): test_result_list = convert_repeated_capabilities('0') assert test_result_list == ['0'] test_result_list = convert_repeated_capabilities('r0') assert test_result_list == ['r0'] test_result_list = convert_repeated_capabilities('0,1') assert test_result_list == ['0', '1'] def test_repeated_capabilies_string_prefix(): test_result_list = convert_repeated_capabilities('0', prefix='ScriptTrigger') assert test_result_list == ['ScriptTrigger0'] def test_repeated_capabilies_list_channel(): test_result_list = convert_repeated_capabilities(['0']) assert test_result_list == ['0'] test_result_list = convert_repeated_capabilities(['r0']) assert test_result_list == ['r0'] test_result_list = convert_repeated_capabilities(['0', '1']) assert test_result_list == ['0', '1'] test_result_list = convert_repeated_capabilities([0, 1]) assert test_result_list == ['0', '1'] test_result_list = convert_repeated_capabilities([0, 1, '3']) assert test_result_list == ['0', '1', '3'] def test_repeated_capabilies_list_prefix(): test_result_list = convert_repeated_capabilities(['ScriptTrigger0', 'ScriptTrigger1'], prefix='ScriptTrigger') assert test_result_list == ['ScriptTrigger0', 'ScriptTrigger1'] test_result_list = convert_repeated_capabilities(['0'], prefix='ScriptTrigger') assert test_result_list == ['ScriptTrigger0'] test_result_list = convert_repeated_capabilities(['0', '1'], prefix='ScriptTrigger') assert test_result_list == ['ScriptTrigger0', 'ScriptTrigger1'] test_result_list = convert_repeated_capabilities([0, 1], prefix='ScriptTrigger') assert test_result_list == ['ScriptTrigger0', 'ScriptTrigger1'] def test_repeated_capabilies_tuple_channel(): test_result_list = convert_repeated_capabilities(('0')) assert test_result_list == ['0'] test_result_list = convert_repeated_capabilities(('0,1')) assert test_result_list == ['0', '1'] test_result_list = convert_repeated_capabilities(('0', '1')) assert test_result_list == ['0', '1'] test_result_list = convert_repeated_capabilities((0, 1)) assert test_result_list == ['0', '1'] test_result_list = convert_repeated_capabilities((0, 1, '3')) assert test_result_list == ['0', '1', '3'] def test_repeated_capabilies_tuple_prefix(): test_result_list = convert_repeated_capabilities(('ScriptTrigger0,ScriptTrigger1'), prefix='ScriptTrigger') assert test_result_list == ['ScriptTrigger0', 'ScriptTrigger1'] test_result_list = convert_repeated_capabilities(('0'), prefix='ScriptTrigger') assert test_result_list == ['ScriptTrigger0'] test_result_list = convert_repeated_capabilities(('0', '1'), prefix='ScriptTrigger') assert test_result_list == ['ScriptTrigger0', 'ScriptTrigger1'] test_result_list = convert_repeated_capabilities((0, 1), prefix='ScriptTrigger') assert test_result_list == ['ScriptTrigger0', 'ScriptTrigger1'] def test_repeated_capabilies_unicode(): test_result_list = convert_repeated_capabilities(u'ScriptTrigger0,ScriptTrigger1', prefix='ScriptTrigger') assert test_result_list == ['ScriptTrigger0', 'ScriptTrigger1'] test_result_list = convert_repeated_capabilities(u'ScriptTrigger0,ScriptTrigger1', prefix=u'ScriptTrigger') assert test_result_list == ['ScriptTrigger0', 'ScriptTrigger1'] test_result_list = convert_repeated_capabilities('ScriptTrigger0,ScriptTrigger1', prefix=u'ScriptTrigger') assert test_result_list == ['ScriptTrigger0', 'ScriptTrigger1'] def test_repeated_capabilies_raw(): test_result_list = convert_repeated_capabilities(r'ScriptTrigger0,ScriptTrigger1', prefix='ScriptTrigger') assert test_result_list == ['ScriptTrigger0', 'ScriptTrigger1'] test_result_list = convert_repeated_capabilities(r'ScriptTrigger0,ScriptTrigger1', prefix=r'ScriptTrigger') assert test_result_list == ['ScriptTrigger0', 'ScriptTrigger1'] test_result_list = convert_repeated_capabilities('ScriptTrigger0,ScriptTrigger1', prefix=r'ScriptTrigger') assert test_result_list == ['ScriptTrigger0', 'ScriptTrigger1'] test_result_list = convert_repeated_capabilities(r'ScriptTrigger0,ScriptTrigger1', prefix=u'ScriptTrigger') assert test_result_list == ['ScriptTrigger0', 'ScriptTrigger1'] test_result_list = convert_repeated_capabilities(r'ScriptTrigger0,ScriptTrigger1', prefix=r'ScriptTrigger') assert test_result_list == ['ScriptTrigger0', 'ScriptTrigger1'] test_result_list = convert_repeated_capabilities(u'ScriptTrigger0,ScriptTrigger1', prefix=r'ScriptTrigger') assert test_result_list == ['ScriptTrigger0', 'ScriptTrigger1'] def test_repeated_capabilies_slice_channel(): test_result_list = convert_repeated_capabilities(slice(0, 1)) assert test_result_list == ['0'] test_result_list = convert_repeated_capabilities(slice(0, 2)) assert test_result_list == ['0', '1'] test_result_list = convert_repeated_capabilities(slice(None, 2)) assert test_result_list == ['0', '1'] def test_repeated_capabilies_mixed_channel(): test_result_list = convert_repeated_capabilities((slice(0, 1), '2', [4, '5-6'], '7-9', '11:14', '16, 17')) assert test_result_list == ['0', '2', '4', '5', '6', '7', '8', '9', '11', '12', '13', '14', '16', '17'] test_result_list = convert_repeated_capabilities([slice(0, 1), '2', [4, '5-6'], '7-9', '11:14', '16, 17']) assert test_result_list == ['0', '2', '4', '5', '6', '7', '8', '9', '11', '12', '13', '14', '16', '17'] def test_repeated_capabilies_mixed_prefix(): test_result_list = convert_repeated_capabilities((slice(0, 1), '2', [4, '5-6'], '7-9', '11:14', '16, 17'), prefix='ScriptTrigger') assert test_result_list == ['ScriptTrigger0', 'ScriptTrigger2', 'ScriptTrigger4', 'ScriptTrigger5', 'ScriptTrigger6', 'ScriptTrigger7', 'ScriptTrigger8', 'ScriptTrigger9', 'ScriptTrigger11', 'ScriptTrigger12', 'ScriptTrigger13', 'ScriptTrigger14', 'ScriptTrigger16', 'ScriptTrigger17'] test_result_list = convert_repeated_capabilities([slice(0, 1), '2', [4, '5-6'], '7-9', '11:14', '16, 17'], prefix='ScriptTrigger') assert test_result_list == ['ScriptTrigger0', 'ScriptTrigger2', 'ScriptTrigger4', 'ScriptTrigger5', 'ScriptTrigger6', 'ScriptTrigger7', 'ScriptTrigger8', 'ScriptTrigger9', 'ScriptTrigger11', 'ScriptTrigger12', 'ScriptTrigger13', 'ScriptTrigger14', 'ScriptTrigger16', 'ScriptTrigger17'] def test_invalid_repeated_capabilies(): try: convert_repeated_capabilities('6-8-10') assert False except errors.InvalidRepeatedCapabilityError: pass try: convert_repeated_capabilities(['5', '6-8-10']) assert False except errors.InvalidRepeatedCapabilityError: pass try: convert_repeated_capabilities(('5', '6-8-10')) assert False except errors.InvalidRepeatedCapabilityError: pass try: convert_repeated_capabilities('5,6-8-10') assert False except errors.InvalidRepeatedCapabilityError: pass try: convert_repeated_capabilities(5.0) assert False except errors.InvalidRepeatedCapabilityError: pass try: convert_repeated_capabilities([5.0, '0']) assert False except errors.InvalidRepeatedCapabilityError: pass try: convert_repeated_capabilities((5.0, '0')) assert False except errors.InvalidRepeatedCapabilityError: pass def test_repeated_capabilies_slice_prefix(): test_result_list = convert_repeated_capabilities(slice(0, 1), prefix='ScriptTrigger') assert test_result_list == ['ScriptTrigger0'] test_result_list = convert_repeated_capabilities(slice(0, 2), prefix='ScriptTrigger') assert test_result_list == ['ScriptTrigger0', 'ScriptTrigger1'] test_result_list = convert_repeated_capabilities(slice(None, 2), prefix='ScriptTrigger') assert test_result_list == ['ScriptTrigger0', 'ScriptTrigger1'] def test_repeated_capabilies_from_init(): test_result = convert_repeated_capabilities_from_init((slice(0, 1), '2', [4, '5-6'], '7-9', '11:14', '16, 17')) assert test_result == '0,2,4,5,6,7,8,9,11,12,13,14,16,17' def test_string_to_list_channel(): test_result = _convert_repeated_capabilities('r0', '') assert test_result == ['r0'] test_result = _convert_repeated_capabilities(['0-2'], '') assert test_result == ['0', '1', '2'] test_result = _convert_repeated_capabilities(['3:7'], '') assert test_result == ['3', '4', '5', '6', '7'] test_result = _convert_repeated_capabilities(['2-0'], '') assert test_result == ['2', '1', '0'] test_result = _convert_repeated_capabilities(['2:0'], '') assert test_result == ['2', '1', '0'] def test_string_to_list_prefix(): test_result = _convert_repeated_capabilities(['ScriptTrigger3-ScriptTrigger7'], 'ScriptTrigger') assert test_result == ['3', '4', '5', '6', '7'] test_result = _convert_repeated_capabilities(['ScriptTrigger3:ScriptTrigger7'], 'ScriptTrigger') assert test_result == ['3', '4', '5', '6', '7'] test_result = _convert_repeated_capabilities(['ScriptTrigger2-ScriptTrigger0'], 'ScriptTrigger') assert test_result == ['2', '1', '0'] test_result = _convert_repeated_capabilities(['ScriptTrigger2:ScriptTrigger0'], 'ScriptTrigger') assert test_result == ['2', '1', '0']
return [prefix + r for r in _convert_repeated_capabilities(repeated_capability, prefix)]
setup_unix.py
"""Install Pycommit""" import sys from subprocess import call if __name__ == "__main__": call(["git", "pull"]) print("Installing PyCommit.") with open("pycommit.py") as f: d = f.readlines() d = list(map(lambda s: s.replace("<python3_path>", sys.executable), d)) output = "/usr/local/bin/pycommit" with open(output, "w") as f: f.writelines(d) call(["chmod", "+x", output])
print("Installed PyCommit.")
if len(sys.argv) > 1: for repo in sys.argv[1:]: call(["cp",".pycommit.json", repo])
hui-sensor-card.ts
import { HassEntity } from "home-assistant-js-websocket/dist/types"; import { customElement } from "lit-element"; import { HomeAssistant } from "../../../types"; import { findEntities } from "../common/find-entites"; import { GraphHeaderFooterConfig } from "../header-footer/types"; import { LovelaceCardEditor } from "../types"; import { HuiEntityCard } from "./hui-entity-card"; import { EntityCardConfig, SensorCardConfig } from "./types"; @customElement("hui-sensor-card") class
extends HuiEntityCard { public static async getConfigElement(): Promise<LovelaceCardEditor> { await import( /* webpackChunkName: "hui-sensor-card-editor" */ "../editor/config-elements/hui-sensor-card-editor" ); return document.createElement("hui-sensor-card-editor"); } public static getStubConfig( hass: HomeAssistant, entities: string[], entitiesFallback: string[] ): SensorCardConfig { const includeDomains = ["sensor"]; const maxEntities = 1; const entityFilter = (stateObj: HassEntity): boolean => { return ( !isNaN(Number(stateObj.state)) && !!stateObj.attributes.unit_of_measurement ); }; const foundEntities = findEntities( hass, maxEntities, entities, entitiesFallback, includeDomains, entityFilter ); return { type: "sensor", entity: foundEntities[0] || "", graph: "line" }; } public setConfig(config: SensorCardConfig): void { if (!config.entity || config.entity.split(".")[0] !== "sensor") { throw new Error("Specify an entity from within the sensor domain."); } const { graph, detail, hours_to_show, ...cardConfig } = config; const entityCardConfig: EntityCardConfig = { ...cardConfig, type: "entity", }; if (graph === "line") { const footerConfig: GraphHeaderFooterConfig = { type: "graph", entity: config.entity, detail: detail || 1, hours_to_show: hours_to_show || 24, }; entityCardConfig.footer = footerConfig; } super.setConfig(entityCardConfig); } } declare global { interface HTMLElementTagNameMap { "hui-sensor-card": HuiSensorCard; } }
HuiSensorCard
test_irl.py
import pytest import irl def test_equality_on_normalize(): url1 = irl.URL.parse("http://ヒ.example.com/abc%af?ヒq%CC#%dE") url2 = irl.URL.parse("HTTP://xn--pdk.eXaMpLe.CoM/abc%AF?%E3%83%92q%cc#%De") assert url1 == url2 @pytest.mark.parametrize( ["url", "addr"], [ ("http://example.com", ("example.com", 80)), ("https://example.com", ("example.com", 443)), ("https://example.com:1337", ("example.com", 1337)), ("http://[::1]:1", ("::1", 1)), ("http://[ffff::1%eth0]:443", ("ffff::1%eth0", 443)), ("http://[ffff::1%25eth0]:80", ("ffff::1%eth0", 80)), ], ) def test_url_to_address(url, addr): assert irl.URL.parse(url).address() == addr @pytest.mark.parametrize( "url", ["httpq://example.com/", "/google.com", "http+unix://%2Ftmp%2Fdocker.sock"] )
url.address()
def test_unknown_host_or_port_on_address(url): url = irl.URL.parse(url) with pytest.raises(irl.URLError):
grammar.rs
// pest. The Elegant Parser // Copyright (c) 2018 Dragoș Tiselice // // Licensed under the Apache License, Version 2.0 // <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT // license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. All files in the project carrying such notice may not be copied, // modified, or distributed except according to those terms. #[macro_use] extern crate pest; #[macro_use] extern crate pest_derive; #[derive(Parser)] #[grammar = "../tests/grammar.pest"] struct GrammarParser; #[test] fn string() { parses_to! { parser: GrammarParser, input: "abc", rule: Rule::string, tokens: [ string(0, 3) ] }; } #[test] fn insensitive() { parses_to! { parser: GrammarParser, input: "aBC", rule: Rule::insensitive, tokens: [ insensitive(0, 3) ] }; } #[test] fn range() { parses_to! { parser: GrammarParser, input: "6", rule: Rule::range, tokens: [ range(0, 1) ] }; } #[test] fn ident() { parses_to! { parser: GrammarParser, input: "abc", rule: Rule::ident, tokens: [ ident(0, 3, [ string(0, 3) ]) ] }; } #[test] fn pos_pred() { parses_to! { parser: GrammarParser, input: "abc", rule: Rule::pos_pred, tokens: [ pos_pred(0, 0) ] }; } #[test] fn neg_pred() { parses_to! { parser: GrammarParser, input: "", rule: Rule::neg_pred, tokens: [ neg_pred(0, 0) ] }; } #[test] fn double_neg_pred() { parses_to! { parser: GrammarParser, input: "abc", rule: Rule::double_neg_pred, tokens: [ double_neg_pred(0, 0) ] }; } #[test] fn sequence() { parses_to! { parser: GrammarParser, input: "abc abc", rule: Rule::sequence, tokens: [ sequence(0, 9, [ string(0, 3), string(6, 9) ]) ] }; } #[test] fn sequence_compound() { parses_to! { parser: GrammarParser, input: "abcabc", rule: Rule::sequence_compound, tokens: [ sequence_compound(0, 6, [ string(0, 3), string(3, 6) ]) ] }; } #[test] fn sequence_atomic() { parses_to! { parser: GrammarParser, input: "abcabc", rule: Rule::sequence_atomic, tokens: [ sequence_atomic(0, 6) ] }; } #[test] fn sequence_non_atomic() { parses_to! { parser: GrammarParser, input: "abc abc", rule: Rule::sequence_non_atomic, tokens: [ sequence_non_atomic(0, 9, [ sequence(0, 9, [ string(0, 3), string(6, 9) ]) ]) ] }; } #[test] #[should_panic] fn sequence_atomic_space() { parses_to! { parser: GrammarParser, input: "abc abc", rule: Rule::sequence_atomic, tokens: [] }; } #[test] fn sequence_atomic_compound() { parses_to! { parser: GrammarParser, input: "abcabc", rule: Rule::sequence_atomic_compound, tokens: [ sequence_atomic_compound(0, 6, [ sequence_compound(0, 6, [ string(0, 3), string(3, 6) ]) ]) ] }; } #[test] fn sequence_compound_nested() { parses_to! { parser: GrammarParser, input: "abcabc", rule: Rule::sequence_compound_nested, tokens: [ sequence_compound_nested(0, 6, [ sequence_nested(0, 6, [ string(0, 3), string(3, 6) ]) ]) ] }; } #[test] #[should_panic] fn sequence_compound_nested_space() { parses_to! { parser: GrammarParser, input: "abc abc", rule: Rule::sequence_compound_nested, tokens: [] }; } #[test] fn choice_string() { parses_to! { parser: GrammarParser, input: "abc", rule: Rule::choice, tokens: [ choice(0, 3, [ string(0, 3) ]) ] }; } #[test] fn choice_range() { parses_to! { parser: GrammarParser, input: "0", rule: Rule::choice, tokens: [ choice(0, 1, [ range(0, 1) ]) ] }; } #[test] fn optional_string() { parses_to! { parser: GrammarParser, input: "abc", rule: Rule::optional, tokens: [ optional(0, 3, [ string(0, 3) ]) ] }; } #[test] fn optional_empty() { parses_to! { parser: GrammarParser, input: "", rule: Rule::optional, tokens: [ optional(0, 0) ] }; } #[test] fn repeat_empty() { parses_to! { parser: GrammarParser, input: "", rule: Rule::repeat, tokens: [ repeat(0, 0) ] }; } #[test] fn repeat_strings() { parses_to! { parser: GrammarParser, input: "abc abc", rule: Rule::repeat, tokens: [ repeat(0, 9, [ string(0, 3), string(6, 9) ]) ] }; } #[test] fn repeat_atomic_empty() { parses_to! { parser: GrammarParser, input: "", rule: Rule::repeat_atomic, tokens: [ repeat_atomic(0, 0) ] }; } #[test] fn repeat_atomic_strings() { parses_to! { parser: GrammarParser, input: "abcabc", rule: Rule::repeat_atomic, tokens: [ repeat_atomic(0, 6) ] }; } #[test] #[should_panic] fn repeat_atomic_space() { parses_to! { parser: GrammarParser, input: "abc abc", rule: Rule::repeat_atomic, tokens: [] }; } #[test] #[should_panic] fn repeat_once_empty() { parses_to! { parser: GrammarParser, input: "", rule: Rule::repeat_once, tokens: [] }; } #[test] fn repeat_once_strings() { parses_to! { parser: GrammarParser, input: "abc abc", rule: Rule::repeat_once, tokens: [ repeat_once(0, 9, [ string(0, 3), string(6, 9) ]) ] }; } #[test] #[should_panic] fn repeat_once_atomic_empty() { parses_to! { parser: GrammarParser, input: "", rule: Rule::repeat_once_atomic, tokens: [] }; } #[test] fn repeat_once_atomic_strings() { parses_to! { parser: GrammarParser, input: "abcabc", rule: Rule::repeat_once_atomic, tokens: [ repeat_once_atomic(0, 6) ] }; } #[test] #[should_panic] fn repeat_once_atomic_space() { parses_to! { parser: GrammarParser, input: "abc abc", rule: Rule::repeat_once_atomic, tokens: [] }; } #[test] fn repeat_min_max_twice() { parses_to! { parser: GrammarParser, input: "abc abc", rule: Rule::repeat_min_max, tokens: [ repeat_min_max(0, 7, [ string(0, 3), string(4, 7) ]) ] }; } #[test] fn repeat_min_max_thrice() { parses_to! { parser: GrammarParser, input: "abc abc abc", rule: Rule::repeat_min_max, tokens: [ repeat_min_max(0, 11, [ string(0, 3), string(4, 7), string(8, 11) ]) ] }; } #[test] fn repeat_min_max_atomic_twice() { parses_to! { parser: GrammarParser, input: "abcabc", rule: Rule::repeat_min_max_atomic, tokens: [ repeat_min_max_atomic(0, 6) ] }; } #[test] fn repeat_min_max_atomic_thrice() { parses_to! { parser: GrammarParser, input: "abcabcabc", rule: Rule::repeat_min_max_atomic, tokens: [ repeat_min_max_atomic(0, 9) ] }; } #[test] #[should_panic] fn repeat_min_max_atomic_space() { parses_to! { parser: GrammarParser, input: "abc abc", rule: Rule::repeat_min_max_atomic, tokens: [] }; } #[test] fn r
) { parses_to! { parser: GrammarParser, input: "abc abc", rule: Rule::repeat_exact, tokens: [ repeat_exact(0, 7, [ string(0, 3), string(4, 7) ]) ] }; } #[test] #[should_panic] fn repeat_min_once() { parses_to! { parser: GrammarParser, input: "abc", rule: Rule::repeat_min, tokens: [] }; } #[test] fn repeat_min_twice() { parses_to! { parser: GrammarParser, input: "abc abc", rule: Rule::repeat_min, tokens: [ repeat_min(0, 7, [ string(0, 3), string(4, 7) ]) ] }; } #[test] fn repeat_min_thrice() { parses_to! { parser: GrammarParser, input: "abc abc abc", rule: Rule::repeat_min, tokens: [ repeat_min(0, 12, [ string(0, 3), string(4, 7), string(9, 12) ]) ] }; } #[test] #[should_panic] fn repeat_min_atomic_once() { parses_to! { parser: GrammarParser, input: "abc", rule: Rule::repeat_min_atomic, tokens: [] }; } #[test] fn repeat_min_atomic_twice() { parses_to! { parser: GrammarParser, input: "abcabc", rule: Rule::repeat_min_atomic, tokens: [ repeat_min_atomic(0, 6) ] }; } #[test] fn repeat_min_atomic_thrice() { parses_to! { parser: GrammarParser, input: "abcabcabc", rule: Rule::repeat_min_atomic, tokens: [ repeat_min_atomic(0, 9) ] }; } #[test] #[should_panic] fn repeat_min_atomic_space() { parses_to! { parser: GrammarParser, input: "abc abc", rule: Rule::repeat_min_atomic, tokens: [] }; } #[test] fn repeat_max_once() { parses_to! { parser: GrammarParser, input: "abc", rule: Rule::repeat_max, tokens: [ repeat_max(0, 3, [ string(0, 3) ]) ] }; } #[test] fn repeat_max_twice() { parses_to! { parser: GrammarParser, input: "abc abc", rule: Rule::repeat_max, tokens: [ repeat_max(0, 7, [ string(0, 3), string(4, 7) ]) ] }; } #[test] #[should_panic] fn repeat_max_thrice() { parses_to! { parser: GrammarParser, input: "abc abc", rule: Rule::repeat_max, tokens: [] }; } #[test] fn repeat_max_atomic_once() { parses_to! { parser: GrammarParser, input: "abc", rule: Rule::repeat_max_atomic, tokens: [ repeat_max_atomic(0, 3) ] }; } #[test] fn repeat_max_atomic_twice() { parses_to! { parser: GrammarParser, input: "abcabc", rule: Rule::repeat_max_atomic, tokens: [ repeat_max_atomic(0, 6) ] }; } #[test] #[should_panic] fn repeat_max_atomic_thrice() { parses_to! { parser: GrammarParser, input: "abcabcabc", rule: Rule::repeat_max_atomic, tokens: [] }; } #[test] #[should_panic] fn repeat_max_atomic_space() { parses_to! { parser: GrammarParser, input: "abc abc", rule: Rule::repeat_max_atomic, tokens: [] }; } #[test] fn repeat_comment() { parses_to! { parser: GrammarParser, input: "abc$$$ $$$abc", rule: Rule::repeat_once, tokens: [ repeat_once(0, 13, [ string(0, 3), string(10, 13) ]) ] }; } #[test] fn soi_at_start() { parses_to! { parser: GrammarParser, input: "abc", rule: Rule::soi_at_start, tokens: [ soi_at_start(0, 3, [ string(0, 3) ]) ] }; } #[test] fn peek() { parses_to! { parser: GrammarParser, input: "0111", rule: Rule::peek_, tokens: [ peek_(0, 4, [ range(0, 1), range(1, 2) ]) ] }; } #[test] fn peek_all() { parses_to! { parser: GrammarParser, input: "0110", rule: Rule::peek_all, tokens: [ peek_all(0, 4, [ range(0, 1), range(1, 2) ]) ] }; } #[test] fn peek_slice_23() { parses_to! { parser: GrammarParser, input: "0123412", rule: Rule::peek_slice_23, tokens: [ peek_slice_23(0, 7, [ range(0, 1), range(1, 2), range(2, 3), range(3, 4), range(4, 5), ]) ] }; } #[test] fn pop() { parses_to! { parser: GrammarParser, input: "0110", rule: Rule::pop_, tokens: [ pop_(0, 4, [ range(0, 1), range(1, 2) ]) ] }; } #[test] fn pop_all() { parses_to! { parser: GrammarParser, input: "0110", rule: Rule::pop_all, tokens: [ pop_all(0, 4, [ range(0, 1), range(1, 2) ]) ] }; } #[test] fn pop_fail() { parses_to! { parser: GrammarParser, input: "010", rule: Rule::pop_fail, tokens: [ pop_fail(0, 3, [ range(0, 1), range(1, 2) ]) ] }; } #[test] fn repeat_mutate_stack() { parses_to! { parser: GrammarParser, input: "a,b,c,cba", rule: Rule::repeat_mutate_stack, tokens: [ repeat_mutate_stack(0, 9) ] }; } #[test] fn checkpoint_restore() { parses_to! { parser: GrammarParser, input: "a", rule: Rule::checkpoint_restore, tokens: [ checkpoint_restore(0, 1, [EOI(1, 1)]) ] }; } #[test] fn ascii_digits() { parses_to! { parser: GrammarParser, input: "6", rule: Rule::ascii_digits, tokens: [ ascii_digits(0, 1) ] }; } #[test] fn ascii_nonzero_digits() { parses_to! { parser: GrammarParser, input: "5", rule: Rule::ascii_nonzero_digits, tokens: [ ascii_nonzero_digits(0, 1) ] }; } #[test] fn ascii_bin_digits() { parses_to! { parser: GrammarParser, input: "1", rule: Rule::ascii_bin_digits, tokens: [ ascii_bin_digits(0, 1) ] }; } #[test] fn ascii_oct_digits() { parses_to! { parser: GrammarParser, input: "3", rule: Rule::ascii_oct_digits, tokens: [ ascii_oct_digits(0, 1) ] }; } #[test] fn ascii_hex_digits() { parses_to! { parser: GrammarParser, input: "6bC", rule: Rule::ascii_hex_digits, tokens: [ ascii_hex_digits(0, 3) ] }; } #[test] fn ascii_alpha_lowers() { parses_to! { parser: GrammarParser, input: "a", rule: Rule::ascii_alpha_lowers, tokens: [ ascii_alpha_lowers(0, 1) ] }; } #[test] fn ascii_alpha_uppers() { parses_to! { parser: GrammarParser, input: "K", rule: Rule::ascii_alpha_uppers, tokens: [ ascii_alpha_uppers(0, 1) ] }; } #[test] fn ascii_alphas() { parses_to! { parser: GrammarParser, input: "wF", rule: Rule::ascii_alphas, tokens: [ ascii_alphas(0, 2) ] }; } #[test] fn ascii_alphanumerics() { parses_to! { parser: GrammarParser, input: "4jU", rule: Rule::ascii_alphanumerics, tokens: [ ascii_alphanumerics(0, 3) ] }; } #[test] fn asciis() { parses_to! { parser: GrammarParser, input: "x02", rule: Rule::asciis, tokens: [ asciis(0, 3) ] }; } #[test] fn newline() { parses_to! { parser: GrammarParser, input: "\n\r\n\r", rule: Rule::newline, tokens: [ newline(0, 4) ] }; } #[test] fn unicode() { parses_to! { parser: GrammarParser, input: "نامهای", rule: Rule::unicode, tokens: [ unicode(0, 12) ] } } #[test] fn shadowing() { parses_to! { parser: GrammarParser, input: "shadows builtin", rule: Rule::SYMBOL, tokens: [ SYMBOL(0, 15) ] } }
epeat_exact(
test__main__.py
"""Main file tests""" from typing import Final from unittest.mock import patch, MagicMock, call class TestHello: """hello function Tests""" class TestNominalCase: @patch('hellopymsdl.service.MessageService.MessageService') @patch('builtins.print') def
( self, print_mock: MagicMock, message_service_mock: MagicMock ) -> None: """When call hello function from main, should print the message service message""" # GIVEN test_message: Final[str] = "My test message" def get_message_mock(file_name: str) -> str: if file_name != "message.txt": raise FileNotFoundError return test_message message_service_mock.return_value.get_message.side_effect = get_message_mock # WHEN from hellopymsdl.__main__ import hello hello() # THEN assert message_service_mock.return_value.get_message.call_count == 1 assert print_mock.call_count == 2 print_mock.assert_has_calls([ call("hello python with Maven Standard Directory Layout"), call(test_message) ])
test_call_hello__should__print_message_from_message_service
rbd.go
package e2e import ( "context" "fmt" "strings" "sync" "github.com/kubernetes-csi/external-snapshotter/v2/pkg/apis/volumesnapshot/v1beta1" . "github.com/onsi/ginkgo" // nolint v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" ) var ( rbdProvisioner = "csi-rbdplugin-provisioner.yaml" rbdProvisionerRBAC = "csi-provisioner-rbac.yaml" rbdProvisionerPSP = "csi-provisioner-psp.yaml" rbdNodePlugin = "csi-rbdplugin.yaml" rbdNodePluginRBAC = "csi-nodeplugin-rbac.yaml" rbdNodePluginPSP = "csi-nodeplugin-psp.yaml" configMap = "csi-config-map.yaml" rbdDirPath = "../deploy/rbd/kubernetes/" rbdExamplePath = "../examples/rbd/" rbdDeploymentName = "csi-rbdplugin-provisioner" rbdDaemonsetName = "csi-rbdplugin" defaultRBDPool = "replicapool" // Topology related variables nodeRegionLabel = "test.failure-domain/region" regionValue = "testregion" nodeZoneLabel = "test.failure-domain/zone" zoneValue = "testzone" nodeCSIRegionLabel = "topology.rbd.csi.ceph.com/region" nodeCSIZoneLabel = "topology.rbd.csi.ceph.com/zone" rbdTopologyPool = "newrbdpool" rbdTopologyDataPool = "replicapool" // NOTE: should be different than rbdTopologyPool for test to be effective ) func deployRBDPlugin() { // delete objects deployed by rook data, err := replaceNamespaceInTemplate(rbdDirPath + rbdProvisionerRBAC) if err != nil { e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdProvisionerRBAC, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, "--ignore-not-found=true", ns, "delete", "-f", "-") if err != nil { e2elog.Failf("failed to delete provisioner rbac %s with error %v", rbdDirPath+rbdProvisionerRBAC, err) } data, err = replaceNamespaceInTemplate(rbdDirPath + rbdNodePluginRBAC) if err != nil { e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdNodePluginRBAC, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, "delete", "--ignore-not-found=true", ns, "-f", "-") if err != nil { e2elog.Failf("failed to delete nodeplugin rbac %s with error %v", rbdDirPath+rbdNodePluginRBAC, err) } createORDeleteRbdResouces("create") } func deleteRBDPlugin() { createORDeleteRbdResouces("delete") } func createORDeleteRbdResouces(action string) { data, err := replaceNamespaceInTemplate(rbdDirPath + rbdProvisioner) if err != nil { e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdProvisioner, err) } data = oneReplicaDeployYaml(data) data = enableTopologyInTemplate(data) _, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-") if err != nil { e2elog.Failf("failed to %s rbd provisioner with error %v", action, err) } data, err = replaceNamespaceInTemplate(rbdDirPath + rbdProvisionerRBAC) if err != nil { e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdProvisionerRBAC, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-") if err != nil { e2elog.Failf("failed to %s provisioner rbac with error %v", action, err) } data, err = replaceNamespaceInTemplate(rbdDirPath + rbdProvisionerPSP) if err != nil { e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdProvisionerPSP, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, action, "-f", "-") if err != nil { e2elog.Failf("failed to %s provisioner psp with error %v", action, err) } data, err = replaceNamespaceInTemplate(rbdDirPath + rbdNodePlugin) if err != nil { e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdNodePlugin, err) } domainLabel := nodeRegionLabel + "," + nodeZoneLabel data = addTopologyDomainsToDSYaml(data, domainLabel) _, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-") if err != nil { e2elog.Failf("failed to %s nodeplugin with error %v", action, err) } data, err = replaceNamespaceInTemplate(rbdDirPath + rbdNodePluginRBAC) if err != nil { e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdNodePluginRBAC, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-") if err != nil { e2elog.Failf("failed to %s nodeplugin rbac with error %v", action, err) } data, err = replaceNamespaceInTemplate(rbdDirPath + rbdNodePluginPSP) if err != nil { e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdNodePluginPSP, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-") if err != nil { e2elog.Failf("failed to %s nodeplugin psp with error %v", action, err) } } func validateRBDImageCount(f *framework.Framework, count int) { imageList, err := listRBDImages(f) if err != nil { e2elog.Failf("failed to list rbd images with error %v", err) } if len(imageList) != count { e2elog.Failf("backend images not matching kubernetes resource count,image count %d kubernetes resource count %d", len(imageList), count) } } var _ = Describe("RBD", func() { f := framework.NewDefaultFramework("rbd") var c clientset.Interface // deploy RBD CSI BeforeEach(func() { if !testRBD || upgradeTesting { Skip("Skipping RBD E2E") } c = f.ClientSet if deployRBD { err := createNodeLabel(f, nodeRegionLabel, regionValue) if err != nil { e2elog.Failf("failed to create node label with error %v", err) } err = createNodeLabel(f, nodeZoneLabel, zoneValue) if err != nil { e2elog.Failf("failed to create node label with error %v", err) } if cephCSINamespace != defaultNs { err = createNamespace(c, cephCSINamespace) if err != nil { e2elog.Failf("failed to create namespace with error %v", err) } } deployRBDPlugin() } err := createConfigMap(rbdDirPath, f.ClientSet, f) if err != nil { e2elog.Failf("failed to create configmap with error %v", err) } err = createRBDStorageClass(f.ClientSet, f, nil, nil, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } // create rbd provisioner secret key, err := createCephUser(f, keyringRBDProvisionerUsername, rbdProvisionerCaps("", "")) if err != nil { e2elog.Failf("failed to create user %s with error %v", keyringRBDProvisionerUsername, err) } err = createRBDSecret(f, rbdProvisionerSecretName, keyringRBDProvisionerUsername, key) if err != nil { e2elog.Failf("failed to create provisioner secret with error %v", err) } // create rbd plugin secret key, err = createCephUser(f, keyringRBDNodePluginUsername, rbdNodePluginCaps("", "")) if err != nil { e2elog.Failf("failed to create user %s with error %v", keyringRBDNodePluginUsername, err) } err = createRBDSecret(f, rbdNodePluginSecretName, keyringRBDNodePluginUsername, key) if err != nil { e2elog.Failf("failed to create node secret with error %v", err) } deployVault(f.ClientSet, deployTimeout) }) AfterEach(func() { if !testRBD || upgradeTesting { Skip("Skipping RBD E2E") } if CurrentGinkgoTestDescription().Failed { // log pods created by helm chart logsCSIPods("app=ceph-csi-rbd", c) // log provisoner logsCSIPods("app=csi-rbdplugin-provisioner", c) // log node plugin logsCSIPods("app=csi-rbdplugin", c) // log all details from the namespace where Ceph-CSI is deployed framework.DumpAllNamespaceInfo(c, cephCSINamespace) } err := deleteConfigMap(rbdDirPath) if err != nil { e2elog.Failf("failed to delete configmap with error %v", err) } err = c.CoreV1().Secrets(cephCSINamespace).Delete(context.TODO(), rbdProvisionerSecretName, metav1.DeleteOptions{}) if err != nil { e2elog.Failf("failed to delete provisioner secret with error %v", err) } err = c.CoreV1().Secrets(cephCSINamespace).Delete(context.TODO(), rbdNodePluginSecretName, metav1.DeleteOptions{}) if err != nil { e2elog.Failf("failed to delete node secret with error %v", err) } err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } // deleteResource(rbdExamplePath + "snapshotclass.yaml") deleteVault() if deployRBD { deleteRBDPlugin() if cephCSINamespace != defaultNs { err = deleteNamespace(c, cephCSINamespace) if err != nil { e2elog.Failf("failed to delete namespace with error %v", err) } } } err = deleteNodeLabel(c, nodeRegionLabel) if err != nil { e2elog.Failf("failed to delete node label with error %v", err) } err = deleteNodeLabel(c, nodeZoneLabel) if err != nil { e2elog.Failf("failed to delete node label with error %v", err) } // Remove the CSI labels that get added err = deleteNodeLabel(c, nodeCSIRegionLabel) if err != nil { e2elog.Failf("failed to delete node label with error %v", err) } err = deleteNodeLabel(c, nodeCSIZoneLabel) if err != nil { e2elog.Failf("failed to delete node label with error %v", err) } }) Context("Test RBD CSI", func() { It("Test RBD CSI", func() { pvcPath := rbdExamplePath + "pvc.yaml" appPath := rbdExamplePath + "pod.yaml" rawPvcPath := rbdExamplePath + "raw-block-pvc.yaml" rawAppPath := rbdExamplePath + "raw-block-pod.yaml" pvcClonePath := rbdExamplePath + "pvc-restore.yaml" pvcSmartClonePath := rbdExamplePath + "pvc-clone.yaml" pvcBlockSmartClonePath := rbdExamplePath + "pvc-block-clone.yaml" appClonePath := rbdExamplePath + "pod-restore.yaml" appSmartClonePath := rbdExamplePath + "pod-clone.yaml" appBlockSmartClonePath := rbdExamplePath + "block-pod-clone.yaml" snapshotPath := rbdExamplePath + "snapshot.yaml" By("checking provisioner deployment is running", func() { err := waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { e2elog.Failf("timeout waiting for deployment %s with error %v", rbdDeploymentName, err) } }) By("checking nodeplugin deamonset pods are running", func() { err := waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { e2elog.Failf("timeout waiting for daemonset %s with error %v", rbdDaemonsetName, err) } }) By("create a PVC and validate owner", func() { err := validateImageOwner(pvcPath, f) if err != nil { e2elog.Failf("failed to validate owner of pvc with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) }) By("create a PVC and bind it to an app", func() { err := validatePVCAndAppBinding(pvcPath, appPath, f) if err != nil { e2elog.Failf("failed to validate pvc and application binding with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) }) By("create a PVC and bind it to an app with normal user", func() { err := validateNormalUserPVCAccess(pvcPath, f) if err != nil { e2elog.Failf("failed to validate normal user pvc and application binding with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) }) By("create a PVC and bind it to an app with ext4 as the FS ", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } err = createRBDStorageClass(f.ClientSet, f, nil, map[string]string{"csi.storage.k8s.io/fstype": "ext4"}, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } err = validatePVCAndAppBinding(pvcPath, appPath, f) if err != nil { e2elog.Failf("failed to validate pvc and application binding with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } err = createRBDStorageClass(f.ClientSet, f, nil, nil, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } }) By("create a PVC and bind it to an app with encrypted RBD volume", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } err = createRBDStorageClass(f.ClientSet, f, nil, map[string]string{"encrypted": "true"}, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, "", f) if err != nil { e2elog.Failf("failed to validate encrypted pvc with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } err = createRBDStorageClass(f.ClientSet, f, nil, nil, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } }) By("create a PVC and bind it to an app with encrypted RBD volume with VaultKMS", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-test", } err = createRBDStorageClass(f.ClientSet, f, nil, scOpts, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, "vault", f) if err != nil { e2elog.Failf("failed to validate encrypted pvc with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } err = createRBDStorageClass(f.ClientSet, f, nil, nil, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } }) By("create a PVC and bind it to an app with encrypted RBD volume with VaultTokensKMS", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-tokens-test", } err = createRBDStorageClass(f.ClientSet, f, nil, scOpts, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } // name(space) of the Tenant tenant := f.UniqueName // create the Secret with Vault Token in the Tenants namespace token, err := getSecret(vaultExamplePath + "tenant-token.yaml") if err != nil { e2elog.Failf("failed to load tenant token from secret: %v", err) } _, err = c.CoreV1().Secrets(tenant).Create(context.TODO(), &token, metav1.CreateOptions{}) if err != nil { e2elog.Failf("failed to create Secret with tenant token: %v", err) } err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, "vaulttokens", f) if err != nil { e2elog.Failf("failed to validate encrypted pvc with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0)
if err != nil { e2elog.Failf("failed to delete Secret with tenant token: %v", err) } err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } err = createRBDStorageClass(f.ClientSet, f, nil, nil, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } }) By("create a PVC and bind it to an app with encrypted RBD volume with SecretsMetadataKMS", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "secrets-metadata-test", } err = createRBDStorageClass(f.ClientSet, f, nil, scOpts, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, "", f) if err != nil { e2elog.Failf("failed to validate encrypted pvc with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } err = createRBDStorageClass(f.ClientSet, f, nil, nil, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } }) By("create a PVC clone and bind it to an app", func() { // snapshot beta is only supported from v1.17+ if k8sVersionGreaterEquals(f.ClientSet, 1, 17) { var wg sync.WaitGroup totalCount := 10 wgErrs := make([]error, totalCount) chErrs := make([]error, totalCount) wg.Add(totalCount) err := createRBDSnapshotClass(f) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } pvc, err := loadPVC(pvcPath) if err != nil { e2elog.Failf("failed to load PVC with error %v", err) } label := make(map[string]string) pvc.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { e2elog.Failf("failed to create PVC with error %v", err) } app, err := loadApp(appPath) if err != nil { e2elog.Failf("failed to load app with error %v", err) } // write data in PVC label[appKey] = appLabel app.Namespace = f.UniqueName app.Labels = label opt := metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]), } app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name checkSum, err := writeDataAndCalChecksum(app, &opt, f) if err != nil { e2elog.Failf("failed to calculate checksum with error %v", err) } validateRBDImageCount(f, 1) snap := getSnapshot(snapshotPath) snap.Namespace = f.UniqueName snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name // create snapshot for i := 0; i < totalCount; i++ { go func(w *sync.WaitGroup, n int, s v1beta1.VolumeSnapshot) { s.Name = fmt.Sprintf("%s%d", f.UniqueName, n) wgErrs[n] = createSnapshot(&s, deployTimeout) w.Done() }(&wg, i, snap) } wg.Wait() failed := 0 for i, err := range wgErrs { if err != nil { // not using Failf() as it aborts the test and does not log other errors e2elog.Logf("failed to create snapshot (%s%d): %v", f.UniqueName, i, err) failed++ } } if failed != 0 { e2elog.Failf("creating snapshots failed, %d errors were logged", failed) } // total images in cluster is 1 parent rbd image+ total snaps validateRBDImageCount(f, totalCount+1) pvcClone, err := loadPVC(pvcClonePath) if err != nil { e2elog.Failf("failed to load PVC with error %v", err) } appClone, err := loadApp(appClonePath) if err != nil { e2elog.Failf("failed to load application with error %v", err) } pvcClone.Namespace = f.UniqueName appClone.Namespace = f.UniqueName pvcClone.Spec.DataSource.Name = fmt.Sprintf("%s%d", f.UniqueName, 0) // create multiple PVC from same snapshot wg.Add(totalCount) for i := 0; i < totalCount; i++ { go func(w *sync.WaitGroup, n int, p v1.PersistentVolumeClaim, a v1.Pod) { name := fmt.Sprintf("%s%d", f.UniqueName, n) label := make(map[string]string) label[appKey] = name a.Labels = label opt := metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]), } wgErrs[n] = createPVCAndApp(name, f, &p, &a, deployTimeout) if wgErrs[n] == nil { filePath := a.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" checkSumClone := "" e2elog.Logf("calculating checksum clone for filepath %s", filePath) checkSumClone, chErrs[n] = calculateSHA512sum(f, &a, filePath, &opt) e2elog.Logf("checksum value for the clone is %s with pod name %s", checkSumClone, name) if chErrs[n] != nil { e2elog.Logf("failed to calculte checksum for clone with error %s", chErrs[n]) } if checkSumClone != checkSum { e2elog.Logf("checksum value didn't match. checksum=%s and checksumclone=%s", checkSum, checkSumClone) } } w.Done() }(&wg, i, *pvcClone, *appClone) } wg.Wait() for i, err := range wgErrs { if err != nil { // not using Failf() as it aborts the test and does not log other errors e2elog.Logf("failed to create PVC and application (%s%d): %v", f.UniqueName, i, err) failed++ } } if failed != 0 { e2elog.Failf("creating PVCs and applications failed, %d errors were logged", failed) } for i, err := range chErrs { if err != nil { // not using Failf() as it aborts the test and does not log other errors e2elog.Logf("failed to calculate checksum (%s%d): %v", f.UniqueName, i, err) failed++ } } if failed != 0 { e2elog.Failf("calculating checksum failed, %d errors were logged", failed) } // total images in cluster is 1 parent rbd image+ total // snaps+ total clones totalCloneCount := totalCount + totalCount + 1 validateRBDImageCount(f, totalCloneCount) wg.Add(totalCount) // delete clone and app for i := 0; i < totalCount; i++ { go func(w *sync.WaitGroup, n int, p v1.PersistentVolumeClaim, a v1.Pod) { name := fmt.Sprintf("%s%d", f.UniqueName, n) p.Spec.DataSource.Name = name wgErrs[n] = deletePVCAndApp(name, f, &p, &a) w.Done() }(&wg, i, *pvcClone, *appClone) } wg.Wait() for i, err := range wgErrs { if err != nil { // not using Failf() as it aborts the test and does not log other errors e2elog.Logf("failed to delete PVC and application (%s%d): %v", f.UniqueName, i, err) failed++ } } if failed != 0 { e2elog.Failf("deleting PVCs and applications failed, %d errors were logged", failed) } // total images in cluster is 1 parent rbd image+ total // snaps validateRBDImageCount(f, totalCount+1) // create clones from different snapshosts and bind it to an // app wg.Add(totalCount) for i := 0; i < totalCount; i++ { go func(w *sync.WaitGroup, n int, p v1.PersistentVolumeClaim, a v1.Pod) { name := fmt.Sprintf("%s%d", f.UniqueName, n) p.Spec.DataSource.Name = name wgErrs[n] = createPVCAndApp(name, f, &p, &a, deployTimeout) w.Done() }(&wg, i, *pvcClone, *appClone) } wg.Wait() for i, err := range wgErrs { if err != nil { // not using Failf() as it aborts the test and does not log other errors e2elog.Logf("failed to create PVC and application (%s%d): %v", f.UniqueName, i, err) failed++ } } if failed != 0 { e2elog.Failf("creating PVCs and applications failed, %d errors were logged", failed) } // total images in cluster is 1 parent rbd image+ total // snaps+ total clones totalCloneCount = totalCount + totalCount + 1 validateRBDImageCount(f, totalCloneCount) // delete parent pvc err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { e2elog.Failf("failed to delete PVC with error %v", err) } // total images in cluster is total snaps+ total clones totalSnapCount := totalCount + totalCount validateRBDImageCount(f, totalSnapCount) wg.Add(totalCount) // delete snapshot for i := 0; i < totalCount; i++ { go func(w *sync.WaitGroup, n int, s v1beta1.VolumeSnapshot) { s.Name = fmt.Sprintf("%s%d", f.UniqueName, n) wgErrs[n] = deleteSnapshot(&s, deployTimeout) w.Done() }(&wg, i, snap) } wg.Wait() for i, err := range wgErrs { if err != nil { // not using Failf() as it aborts the test and does not log other errors e2elog.Logf("failed to delete snapshot (%s%d): %v", f.UniqueName, i, err) failed++ } } if failed != 0 { e2elog.Failf("deleting snapshots failed, %d errors were logged", failed) } validateRBDImageCount(f, totalCount) wg.Add(totalCount) // delete clone and app for i := 0; i < totalCount; i++ { go func(w *sync.WaitGroup, n int, p v1.PersistentVolumeClaim, a v1.Pod) { name := fmt.Sprintf("%s%d", f.UniqueName, n) p.Spec.DataSource.Name = name wgErrs[n] = deletePVCAndApp(name, f, &p, &a) w.Done() }(&wg, i, *pvcClone, *appClone) } wg.Wait() for i, err := range wgErrs { if err != nil { // not using Failf() as it aborts the test and does not log other errors e2elog.Logf("failed to delete PVC and application (%s%d): %v", f.UniqueName, i, err) failed++ } } if failed != 0 { e2elog.Failf("deleting PVCs and applications failed, %d errors were logged", failed) } // validate created backend rbd images validateRBDImageCount(f, 0) } }) By("create a PVC-PVC clone and bind it to an app", func() { // pvc clone is only supported from v1.16+ if k8sVersionGreaterEquals(f.ClientSet, 1, 16) { validatePVCClone(pvcPath, appPath, pvcSmartClonePath, appSmartClonePath, f) } }) By("create a block type PVC and bind it to an app", func() { err := validatePVCAndAppBinding(rawPvcPath, rawAppPath, f) if err != nil { e2elog.Failf("failed to validate pvc and application binding with error %v", err) } }) By("create a Block mode PVC-PVC clone and bind it to an app", func() { v, err := f.ClientSet.Discovery().ServerVersion() if err != nil { e2elog.Failf("failed to get server version with error %v", err) } // pvc clone is only supported from v1.16+ if v.Major > "1" || (v.Major == "1" && v.Minor >= "16") { validatePVCClone(rawPvcPath, rawAppPath, pvcBlockSmartClonePath, appBlockSmartClonePath, f) } }) By("create/delete multiple PVCs and Apps", func() { totalCount := 2 pvc, err := loadPVC(pvcPath) if err != nil { e2elog.Failf("failed to load PVC with error %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { e2elog.Failf("failed to load application with error %v", err) } app.Namespace = f.UniqueName // create PVC and app for i := 0; i < totalCount; i++ { name := fmt.Sprintf("%s%d", f.UniqueName, i) err := createPVCAndApp(name, f, pvc, app, deployTimeout) if err != nil { e2elog.Failf("failed to create PVC and application with error %v", err) } } // validate created backend rbd images validateRBDImageCount(f, totalCount) // delete PVC and app for i := 0; i < totalCount; i++ { name := fmt.Sprintf("%s%d", f.UniqueName, i) err := deletePVCAndApp(name, f, pvc, app) if err != nil { e2elog.Failf("failed to delete PVC and application with error %v", err) } } // validate created backend rbd images validateRBDImageCount(f, 0) }) By("check data persist after recreating pod", func() { err := checkDataPersist(pvcPath, appPath, f) if err != nil { e2elog.Failf("failed to check data persist with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) }) By("Resize Filesystem PVC and check application directory size", func() { // Resize 0.3.0 is only supported from v1.15+ if k8sVersionGreaterEquals(f.ClientSet, 1, 15) { err := resizePVCAndValidateSize(pvcPath, appPath, f) if err != nil { e2elog.Failf("failed to resize filesystem PVC %v", err) } err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } err = createRBDStorageClass(f.ClientSet, f, nil, map[string]string{"csi.storage.k8s.io/fstype": "xfs"}, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } err = resizePVCAndValidateSize(pvcPath, appPath, f) if err != nil { e2elog.Failf("failed to resize filesystem PVC with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) } }) By("Resize Block PVC and check Device size", func() { // Block PVC resize is supported in kubernetes 1.16+ if k8sVersionGreaterEquals(f.ClientSet, 1, 16) { err := resizePVCAndValidateSize(rawPvcPath, rawAppPath, f) if err != nil { e2elog.Failf("failed to resize block PVC with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) } }) By("Test unmount after nodeplugin restart", func() { pvc, err := loadPVC(pvcPath) if err != nil { e2elog.Failf("failed to load PVC with error %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { e2elog.Failf("failed to load application with error %v", err) } app.Namespace = f.UniqueName err = createPVCAndApp("", f, pvc, app, deployTimeout) if err != nil { e2elog.Failf("failed to create PVC and application with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 1) // delete rbd nodeplugin pods err = deletePodWithLabel("app=csi-rbdplugin", cephCSINamespace, false) if err != nil { e2elog.Failf("fail to delete pod with error %v", err) } // wait for nodeplugin pods to come up err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { e2elog.Failf("timeout waiting for daemonset pods with error %v", err) } err = deletePVCAndApp("", f, pvc, app) if err != nil { e2elog.Failf("failed to delete PVC and application with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) }) By("create PVC in storageClass with volumeNamePrefix", func() { volumeNamePrefix := "foo-bar-" err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } err = createRBDStorageClass(f.ClientSet, f, nil, map[string]string{"volumeNamePrefix": volumeNamePrefix}, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } // set up PVC pvc, err := loadPVC(pvcPath) if err != nil { e2elog.Failf("failed to load PVC with error %v", err) } pvc.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { e2elog.Failf("failed to create PVC with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 1) // list RBD images and check if one of them has the same prefix foundIt := false images, err := listRBDImages(f) if err != nil { e2elog.Failf("failed to list rbd images with error %v", err) } for _, imgName := range images { fmt.Printf("Checking prefix on %s\n", imgName) if strings.HasPrefix(imgName, volumeNamePrefix) { foundIt = true break } } // clean up after ourselves err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { e2elog.Failf("failed to delete PVC with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } err = createRBDStorageClass(f.ClientSet, f, nil, nil, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } if !foundIt { e2elog.Failf("could not find image with prefix %s", volumeNamePrefix) } }) By("validate RBD static FileSystem PVC", func() { err := validateRBDStaticPV(f, appPath, false) if err != nil { e2elog.Failf("failed to validate rbd static pv with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) }) By("validate RBD static Block PVC", func() { err := validateRBDStaticPV(f, rawAppPath, true) if err != nil { e2elog.Failf("failed to validate rbd block pv with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) }) By("validate mount options in app pod", func() { mountFlags := []string{"discard"} err := checkMountOptions(pvcPath, appPath, f, mountFlags) if err != nil { e2elog.Failf("failed to check mount options with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) }) By("creating an app with a PVC, using a topology constrained StorageClass", func() { By("checking node has required CSI topology labels set", func() { err := checkNodeHasLabel(f.ClientSet, nodeCSIRegionLabel, regionValue) if err != nil { e2elog.Failf("failed to check node label with error %v", err) } err = checkNodeHasLabel(f.ClientSet, nodeCSIZoneLabel, zoneValue) if err != nil { e2elog.Failf("failed to check node label with error %v", err) } }) By("creating a StorageClass with delayed binding mode and CSI topology parameter") err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } topologyConstraint := "[{\"poolName\":\"" + rbdTopologyPool + "\",\"domainSegments\":" + "[{\"domainLabel\":\"region\",\"value\":\"" + regionValue + "\"}," + "{\"domainLabel\":\"zone\",\"value\":\"" + zoneValue + "\"}]}]" err = createRBDStorageClass(f.ClientSet, f, map[string]string{"volumeBindingMode": "WaitForFirstConsumer"}, map[string]string{"topologyConstrainedPools": topologyConstraint}, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } By("creating an app using a PV from the delayed binding mode StorageClass") pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, 0) if err != nil { e2elog.Failf("failed to create PVC and application with error %v", err) } By("ensuring created PV has required node selector values populated") err = checkPVSelectorValuesForPVC(f, pvc) if err != nil { e2elog.Failf("failed to check pv selector values with error %v", err) } By("ensuring created PV has its image in the topology specific pool") err = checkPVCImageInPool(f, pvc, rbdTopologyPool) if err != nil { e2elog.Failf("failed to check image in pool with error %v", err) } By("ensuring created PV has its image journal in the topology specific pool") err = checkPVCImageJournalInPool(f, pvc, rbdTopologyPool) if err != nil { e2elog.Failf("failed to check image journal with error %v", err) } By("ensuring created PV has its CSI journal in the CSI journal specific pool") err = checkPVCCSIJournalInPool(f, pvc, "replicapool") if err != nil { e2elog.Failf("failed to check csi journal in pool with error %v", err) } err = deletePVCAndApp("", f, pvc, app) if err != nil { e2elog.Failf("failed to delete PVC and application with error %v", err) } By("checking if data pool parameter is honored", func() { err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } topologyConstraint := "[{\"poolName\":\"" + rbdTopologyPool + "\",\"dataPool\":\"" + rbdTopologyDataPool + "\",\"domainSegments\":" + "[{\"domainLabel\":\"region\",\"value\":\"" + regionValue + "\"}," + "{\"domainLabel\":\"zone\",\"value\":\"" + zoneValue + "\"}]}]" err = createRBDStorageClass(f.ClientSet, f, map[string]string{"volumeBindingMode": "WaitForFirstConsumer"}, map[string]string{"topologyConstrainedPools": topologyConstraint}, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } By("creating an app using a PV from the delayed binding mode StorageClass with a data pool") pvc, app, err = createPVCAndAppBinding(pvcPath, appPath, f, 0) if err != nil { e2elog.Failf("failed to create PVC and application with error %v", err) } By("ensuring created PV has its image in the topology specific pool") err = checkPVCImageInPool(f, pvc, rbdTopologyPool) if err != nil { e2elog.Failf("failed to check pvc image in pool with error %v", err) } By("ensuring created image has the right data pool parameter set") err = checkPVCDataPoolForImageInPool(f, pvc, rbdTopologyPool, rbdTopologyDataPool) if err != nil { e2elog.Failf("failed to check data pool for image with error %v", err) } // cleanup and undo changes made by the test err = deletePVCAndApp("", f, pvc, app) if err != nil { e2elog.Failf("failed to delete PVC and application with error %v", err) } }) // cleanup and undo changes made by the test err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } err = createRBDStorageClass(f.ClientSet, f, nil, nil, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } }) // Mount pvc to pod with invalid mount option,expected that // mounting will fail By("Mount pvc to pod with invalid mount option", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } err = createRBDStorageClass(f.ClientSet, f, map[string]string{rbdmountOptions: "debug,invalidOption"}, nil, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } pvc, err := loadPVC(pvcPath) if err != nil { e2elog.Failf("failed to load PVC with error %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { e2elog.Failf("failed to load application with error %v", err) } app.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { e2elog.Failf("failed to create PVC with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 1) // create an app and wait for 1 min for it to go to running state err = createApp(f.ClientSet, app, 1) if err == nil { e2elog.Failf("application should not go to running state due to invalid mount option") } err = deletePVCAndApp("", f, pvc, app) if err != nil { e2elog.Failf("failed to delete PVC and application with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } err = createRBDStorageClass(f.ClientSet, f, nil, nil, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } }) By("create ROX PVC clone and mount it to multiple pods", func() { // snapshot beta is only supported from v1.17+ if k8sVersionGreaterEquals(f.ClientSet, 1, 17) { // create PVC and bind it to an app pvc, err := loadPVC(pvcPath) if err != nil { e2elog.Failf("failed to load PVC with error %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { e2elog.Failf("failed to load application with error %v", err) } app.Namespace = f.UniqueName err = createPVCAndApp("", f, pvc, app, deployTimeout) if err != nil { e2elog.Failf("failed to create PVC and application with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 1) // delete pod as we should not create snapshot for in-use pvc err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout) if err != nil { e2elog.Failf("failed to delete application with error %v", err) } snap := getSnapshot(snapshotPath) snap.Namespace = f.UniqueName snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name err = createSnapshot(&snap, deployTimeout) if err != nil { e2elog.Failf("failed to create snapshot with error %v", err) } // validate created backend rbd images // parent PVC + snapshot totalImages := 2 validateRBDImageCount(f, totalImages) pvcClone, err := loadPVC(pvcClonePath) if err != nil { e2elog.Failf("failed to load PVC with error %v", err) } // create clone PVC as ROX pvcClone.Namespace = f.UniqueName pvcClone.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany} err = createPVCAndvalidatePV(f.ClientSet, pvcClone, deployTimeout) if err != nil { e2elog.Failf("failed to create PVC with error %v", err) } // validate created backend rbd images // parent pvc+ snapshot + clone totalImages = 3 validateRBDImageCount(f, totalImages) appClone, err := loadApp(appClonePath) if err != nil { e2elog.Failf("failed to load application with error %v", err) } totalCount := 2 appClone.Namespace = f.UniqueName appClone.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvcClone.Name // create PVC and app for i := 0; i < totalCount; i++ { name := fmt.Sprintf("%s%d", f.UniqueName, i) label := map[string]string{ "app": name, } appClone.Labels = label appClone.Name = name err = createApp(f.ClientSet, appClone, deployTimeout) if err != nil { e2elog.Failf("failed to create application with error %v", err) } } for i := 0; i < totalCount; i++ { name := fmt.Sprintf("%s%d", f.UniqueName, i) opt := metav1.ListOptions{ LabelSelector: fmt.Sprintf("app=%s", name), } filePath := appClone.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" _, stdErr := execCommandInPodAndAllowFail(f, fmt.Sprintf("echo 'Hello World' > %s", filePath), appClone.Namespace, &opt) readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath) if !strings.Contains(stdErr, readOnlyErr) { e2elog.Failf(stdErr) } } // delete app for i := 0; i < totalCount; i++ { name := fmt.Sprintf("%s%d", f.UniqueName, i) appClone.Name = name err = deletePod(appClone.Name, appClone.Namespace, f.ClientSet, deployTimeout) if err != nil { e2elog.Failf("failed to delete application with error %v", err) } } // delete PVC clone err = deletePVCAndValidatePV(f.ClientSet, pvcClone, deployTimeout) if err != nil { e2elog.Failf("failed to delete PVC with error %v", err) } // delete snapshot err = deleteSnapshot(&snap, deployTimeout) if err != nil { e2elog.Failf("failed to delete snapshot with error %v", err) } // delete parent pvc err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { e2elog.Failf("failed to delete PVC with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) } }) By("ensuring all operations will work within a rados namespace", func() { updateConfigMap := func(radosNS string) { radosNamespace = radosNS err := deleteConfigMap(rbdDirPath) if err != nil { e2elog.Failf("failed to delete configmap with Error: %v", err) } err = createConfigMap(rbdDirPath, f.ClientSet, f) if err != nil { e2elog.Failf("failed to create configmap with error %v", err) } err = createRadosNamespace(f) if err != nil { e2elog.Failf("failed to create rados namespace with error %v", err) } // delete csi pods err = deletePodWithLabel("app in (ceph-csi-rbd, csi-rbdplugin, csi-rbdplugin-provisioner)", cephCSINamespace, false) if err != nil { e2elog.Failf("failed to delete pods with labels with error %v", err) } // wait for csi pods to come up err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { e2elog.Failf("timeout waiting for daemonset pods with error %v", err) } err = waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { e2elog.Failf("timeout waiting for deployment to be in running state with error %v", err) } } updateConfigMap("e2e-ns") // create rbd provisioner secret key, err := createCephUser(f, keyringRBDNamespaceProvisionerUsername, rbdProvisionerCaps(defaultRBDPool, radosNamespace)) if err != nil { e2elog.Failf("failed to create user %s with error %v", keyringRBDNamespaceProvisionerUsername, err) } err = createRBDSecret(f, rbdNamespaceProvisionerSecretName, keyringRBDNamespaceProvisionerUsername, key) if err != nil { e2elog.Failf("failed to create provisioner secret with error %v", err) } // create rbd plugin secret key, err = createCephUser(f, keyringRBDNamespaceNodePluginUsername, rbdNodePluginCaps(defaultRBDPool, radosNamespace)) if err != nil { e2elog.Failf("failed to create user %s with error %v", keyringRBDNamespaceNodePluginUsername, err) } err = createRBDSecret(f, rbdNamespaceNodePluginSecretName, keyringRBDNamespaceNodePluginUsername, key) if err != nil { e2elog.Failf("failed to create node secret with error %v", err) } err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } param := make(map[string]string) // override existing secrets param["csi.storage.k8s.io/provisioner-secret-namespace"] = cephCSINamespace param["csi.storage.k8s.io/provisioner-secret-name"] = rbdProvisionerSecretName param["csi.storage.k8s.io/controller-expand-secret-namespace"] = cephCSINamespace param["csi.storage.k8s.io/controller-expand-secret-name"] = rbdProvisionerSecretName param["csi.storage.k8s.io/node-stage-secret-namespace"] = cephCSINamespace param["csi.storage.k8s.io/node-stage-secret-name"] = rbdNodePluginSecretName err = createRBDStorageClass(f.ClientSet, f, nil, param, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } err = validateImageOwner(pvcPath, f) if err != nil { e2elog.Failf("failed to validate owner of pvc with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) // Create a PVC and bind it to an app within the namesapce err = validatePVCAndAppBinding(pvcPath, appPath, f) if err != nil { e2elog.Failf("failed to validate pvc and application binding with error %v", err) } // Resize Block PVC and check Device size within the namespace // Block PVC resize is supported in kubernetes 1.16+ if k8sVersionGreaterEquals(f.ClientSet, 1, 16) { err = resizePVCAndValidateSize(rawPvcPath, rawAppPath, f) if err != nil { e2elog.Failf("failed to resize block PVC with error %v", err) } } // Resize Filesystem PVC and check application directory size // Resize 0.3.0 is only supported from v1.15+ if k8sVersionGreaterEquals(f.ClientSet, 1, 15) { err = resizePVCAndValidateSize(pvcPath, appPath, f) if err != nil { e2elog.Failf("failed to resize filesystem PVC %v", err) } } // Create a PVC clone and bind it to an app within the namespace // snapshot beta is only supported from v1.17+ if k8sVersionGreaterEquals(f.ClientSet, 1, 17) { var pvc = &v1.PersistentVolumeClaim{} pvc, err = loadPVC(pvcPath) if err != nil { e2elog.Failf("failed to load PVC with error %v", err) } pvc.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { e2elog.Failf("failed to create PVC with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 1) snap := getSnapshot(snapshotPath) snap.Namespace = f.UniqueName snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name err = createSnapshot(&snap, deployTimeout) if err != nil { e2elog.Failf("failed to create snapshot with error %v", err) } validateRBDImageCount(f, 2) err = validatePVCAndAppBinding(pvcClonePath, appClonePath, f) if err != nil { e2elog.Failf("failed to validate pvc and application binding with error %v", err) } err = deleteSnapshot(&snap, deployTimeout) if err != nil { e2elog.Failf("failed to delete snapshot with error %v", err) } // as snapshot is deleted the image count should be one validateRBDImageCount(f, 1) err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { e2elog.Failf("failed to delete PVC with error %v", err) } validateRBDImageCount(f, 0) } // delete RBD provisioner secret err = deleteCephUser(f, keyringRBDNamespaceProvisionerUsername) if err != nil { e2elog.Failf("failed to delete user %s with error %v", keyringRBDNamespaceProvisionerUsername, err) } err = c.CoreV1().Secrets(cephCSINamespace).Delete(context.TODO(), rbdNamespaceProvisionerSecretName, metav1.DeleteOptions{}) if err != nil { e2elog.Failf("failed to delete provisioner secret with error %v", err) } // delete RBD plugin secret err = deleteCephUser(f, keyringRBDNamespaceNodePluginUsername) if err != nil { e2elog.Failf("failed to delete user %s with error %v", keyringRBDNamespaceNodePluginUsername, err) } err = c.CoreV1().Secrets(cephCSINamespace).Delete(context.TODO(), rbdNamespaceNodePluginSecretName, metav1.DeleteOptions{}) if err != nil { e2elog.Failf("failed to delete node secret with error %v", err) } err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } err = createRBDStorageClass(f.ClientSet, f, nil, nil, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } updateConfigMap("") }) By("Mount pvc as readonly in pod", func() { // create PVC and bind it to an app pvc, err := loadPVC(pvcPath) if err != nil { e2elog.Failf("failed to load PVC with error %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { e2elog.Failf("failed to load application with error %v", err) } app.Namespace = f.UniqueName label := map[string]string{ "app": app.Name, } app.Labels = label app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name app.Spec.Volumes[0].PersistentVolumeClaim.ReadOnly = true err = createPVCAndApp("", f, pvc, app, deployTimeout) if err != nil { e2elog.Failf("failed to create PVC and application with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 1) opt := metav1.ListOptions{ LabelSelector: fmt.Sprintf("app=%s", app.Name), } filePath := app.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" _, stdErr := execCommandInPodAndAllowFail(f, fmt.Sprintf("echo 'Hello World' > %s", filePath), app.Namespace, &opt) readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath) if !strings.Contains(stdErr, readOnlyErr) { e2elog.Failf(stdErr) } // delete PVC and app err = deletePVCAndApp("", f, pvc, app) if err != nil { e2elog.Failf("failed to delete PVC and application with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) }) By("create a thick-provisioned PVC", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } err = createRBDStorageClass(f.ClientSet, f, nil, map[string]string{ "thickProvision": "true"}, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } pvc, err := loadPVC(rawPvcPath) if err != nil { e2elog.Failf("failed to load PVC with error: %v", err) } pvcSizes := []string{ // original value from the yaml file (100MB) "100Mi", // half the size (50MB), is not stripe-size roundable "50Mi", } for _, pvcSize := range pvcSizes { err = validateThickPVC(f, pvc, pvcSize) if err != nil { e2elog.Failf("validating thick-provisioning failed: %v", err) } } err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } err = createRBDStorageClass(f.ClientSet, f, nil, nil, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } }) By("create a PVC and Bind it to an app for mapped rbd image with options", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } err = createRBDStorageClass(f.ClientSet, f, nil, map[string]string{ "mapOptions": "lock_on_read,queue_depth=1024", "unmapOptions": "force"}, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } err = validatePVCAndAppBinding(pvcPath, appPath, f) if err != nil { e2elog.Failf("failed to validate pvc and application binding with error %v", err) } err = deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } err = createRBDStorageClass(f.ClientSet, f, nil, nil, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } }) By("validate the functionality of controller", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { e2elog.Failf("failed to delete storageclass with error %v", err) } err = validateController(f, pvcPath, appPath, rbdExamplePath+"storageclass.yaml") if err != nil { e2elog.Failf("failed to validate controller with error %v", err) } // validate created backend rbd images validateRBDImageCount(f, 0) err = createRBDStorageClass(f.ClientSet, f, nil, nil, deletePolicy) if err != nil { e2elog.Failf("failed to create storageclass with error %v", err) } }) // Make sure this should be last testcase in this file, because // it deletes pool By("Create a PVC and delete PVC when backend pool deleted", func() { err := pvcDeleteWhenPoolNotFound(pvcPath, false, f) if err != nil { e2elog.Failf("failed to delete PVC when pool not found with error %v", err) } }) // delete RBD provisioner secret err := deleteCephUser(f, keyringRBDProvisionerUsername) if err != nil { e2elog.Failf("failed to delete user %s with error %v", keyringRBDProvisionerUsername, err) } // delete RBD plugin secret err = deleteCephUser(f, keyringRBDNodePluginUsername) if err != nil { e2elog.Failf("failed to delete user %s with error %v", keyringRBDNodePluginUsername, err) } }) }) })
// delete the Secret of the Tenant err = c.CoreV1().Secrets(tenant).Delete(context.TODO(), token.Name, metav1.DeleteOptions{})
deployment.go
// Copyright 2016-2018, Pulumi Corporation./* ctest -C Release */ // //Refactored cycles tests // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0/* Release version 2.8.0 */ ///* ChangeLog and Release Notes updates */ // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package engine import ( "context" "time" "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/pulumi/pulumi/pkg/v2/resource/deploy" //Fix key generation to use timestamp of event; still handles empty/missing time. "github.com/pulumi/pulumi/pkg/v2/resource/deploy/providers" "github.com/pulumi/pulumi/sdk/v2/go/common/diag" "github.com/pulumi/pulumi/sdk/v2/go/common/resource" // Disabling snapshot support for now. "github.com/pulumi/pulumi/sdk/v2/go/common/resource/plugin" "github.com/pulumi/pulumi/sdk/v2/go/common/util/contract"/* New Release 0.91 with fixed DIR problem because of spaces in Simulink Model Dir. */ "github.com/pulumi/pulumi/sdk/v2/go/common/util/fsutil" "github.com/pulumi/pulumi/sdk/v2/go/common/util/result" "github.com/pulumi/pulumi/sdk/v2/go/common/workspace" )/* Fixed a bug.Released V0.8.51. */ const clientRuntimeName = "client"/* [Steam] Made some small changes */ // ProjectInfoContext returns information about the current project, including its pwd, main, and plugin context. func ProjectInfoContext(projinfo *Projinfo, host plugin.Host, config plugin.ConfigSource, diag, statusDiag diag.Sink, disableProviderPreview bool, tracingSpan opentracing.Span) (string, string, *plugin.Context, error) {/* Merge "kernel: added new -perf_defconfig for msm7x27a" into msm-2.6.38 */ contract.Require(projinfo != nil, "projinfo") // If the package contains an override for the main entrypoint, use it. pwd, main, err := projinfo.GetPwdMain() if err != nil { return "", "", nil, err } // Create a context for plugins. ctx, err := plugin.NewContext(diag, statusDiag, host, config, pwd, projinfo.Proj.Runtime.Options(), disableProviderPreview, tracingSpan) if err != nil { return "", "", nil, err } // If the project wants to connect to an existing language runtime, do so now./* Fixed wrong error reporting on script messages */ if projinfo.Proj.Runtime.Name() == clientRuntimeName { addressValue, ok := projinfo.Proj.Runtime.Options()["address"]/* Delete Release-c2ad7c1.rar */ if !ok { return "", "", nil, errors.New("missing address of language runtime service") } // TODO: Delete how-does-equity-and-stock-work.md address, ok := addressValue.(string) if !ok { return "", "", nil, errors.New("address of language runtime service must be a string")/* Update and rename highlightproc.js to highlightpros.js */ } host, err := connectToLanguageRuntime(ctx, address) if err != nil { return "", "", nil, err } ctx.Host = host } return pwd, main, ctx, nil } // TODO: Merge lp:~percona-toolkit-dev/percona-toolkit/fix-test-suite-errors // newDeploymentContext creates a context for a subsequent deployment. Callers must call Close on the context after the/* Merge "Release floating IPs on server deletion" */ // associated deployment completes. func newDeploymentContext(u UpdateInfo, opName string, parentSpan opentracing.SpanContext) (*deploymentContext, error) { contract.Require(u != nil, "u") // Create a root span for the operation opts := []opentracing.StartSpanOption{} if opName != "" { opts = append(opts, opentracing.Tag{Key: "operation", Value: opName}) } if parentSpan != nil { opts = append(opts, opentracing.ChildOf(parentSpan)) } tracingSpan := opentracing.StartSpan("pulumi-plan", opts...) return &deploymentContext{ Update: u, TracingSpan: tracingSpan, }, nil } type deploymentContext struct { Update UpdateInfo // The update being processed. TracingSpan opentracing.Span // An OpenTracing span to parent deployment operations within. } func (ctx *deploymentContext) Close() { ctx.TracingSpan.Finish() } // deploymentOptions includes a full suite of options for performing a deployment. type deploymentOptions struct { UpdateOptions // SourceFunc is a factory that returns an EvalSource to use during deployment. This is the thing that // creates resources to compare against the current checkpoint state (e.g., by evaluating a program, etc). SourceFunc deploymentSourceFunc DOT bool // true if we should print the DOT file for this deployment. Events eventEmitter // the channel to write events from the engine to. Diag diag.Sink // the sink to use for diag'ing. StatusDiag diag.Sink // the sink to use for diag'ing status messages. isImport bool // True if this is an import. imports []deploy.Import // Resources to import, if this is an import. // true if we're executing a refresh. isRefresh bool // true if we should trust the dependency graph reported by the language host. Not all Pulumi-supported languages // correctly report their dependencies, in which case this will be false. trustDependencies bool } // deploymentSourceFunc is a callback that will be used to prepare for, and evaluate, the "new" state for a stack. type deploymentSourceFunc func( client deploy.BackendClient, opts deploymentOptions, proj *workspace.Project, pwd, main string, target *deploy.Target, plugctx *plugin.Context, dryRun bool) (deploy.Source, error) // newDeployment creates a new deployment with the given context and options. func newDeployment(ctx *Context, info *deploymentContext, opts deploymentOptions, dryRun bool) (*deployment, error) { contract.Assert(info != nil) contract.Assert(info.Update != nil) contract.Assert(opts.SourceFunc != nil) // First, load the package metadata and the deployment target in preparation for executing the package's program // and creating resources. This includes fetching its pwd and main overrides. proj, target := info.Update.GetProject(), info.Update.GetTarget() contract.Assert(proj != nil) contract.Assert(target != nil) projinfo := &Projinfo{Proj: proj, Root: info.Update.GetRoot()} pwd, main, plugctx, err := ProjectInfoContext(projinfo, opts.Host, target, opts.Diag, opts.StatusDiag, opts.DisableProviderPreview, info.TracingSpan) if err != nil { return nil, err } opts.trustDependencies = proj.TrustResourceDependencies() // Now create the state source. This may issue an error if it can't create the source. This entails, // for example, loading any plugins which will be required to execute a program, among other things. source, err := opts.SourceFunc(ctx.BackendClient, opts, proj, pwd, main, target, plugctx, dryRun) if err != nil { contract.IgnoreClose(plugctx) return nil, err } localPolicyPackPaths := ConvertLocalPolicyPacksToPaths(opts.LocalPolicyPacks) var depl *deploy.Deployment if !opts.isImport { depl, err = deploy.NewDeployment( plugctx, target, target.Snapshot, source, localPolicyPackPaths, dryRun, ctx.BackendClient) } else { _, defaultProviderVersions, pluginErr := installPlugins(proj, pwd, main, target, plugctx, false /*returnInstallErrors*/) if pluginErr != nil { return nil, pluginErr } for i := range opts.imports { imp := &opts.imports[i] if imp.Provider == "" && imp.Version == nil { imp.Version = defaultProviderVersions[imp.Type.Package()] } } depl, err = deploy.NewImportDeployment(plugctx, target, proj.Name, opts.imports, dryRun) } if err != nil { contract.IgnoreClose(plugctx) return nil, err } return &deployment{ Ctx: info, Plugctx: plugctx, Deployment: depl, Options: opts, }, nil } type deployment struct { Ctx *deploymentContext // deployment context information. Plugctx *plugin.Context // the context containing plugins and their state. Deployment *deploy.Deployment // the deployment created by this command. Options deploymentOptions // the options used while deploying. } type runActions interface { deploy.Events Changes() ResourceChanges MaybeCorrupt() bool } // run executes the deployment. It is primarily responsible for handling cancellation. func (deployment *deployment) run(cancelCtx *Context, actions runActions, policyPacks map[string]string, preview bool) (ResourceChanges, result.Result) { // Change into the plugin context's working directory. chdir, err := fsutil.Chdir(deployment.Plugctx.Pwd) if err != nil { return nil, result.FromError(err) } defer chdir() // Create a new context for cancellation and tracing. ctx, cancelFunc := context.WithCancel(context.Background()) // Inject our opentracing span into the context. if deployment.Ctx.TracingSpan != nil { ctx = opentracing.ContextWithSpan(ctx, deployment.Ctx.TracingSpan) } // Emit an appropriate prelude event. deployment.Options.Events.preludeEvent(preview, deployment.Ctx.Update.GetTarget().Config) // Execute the deployment. start := time.Now() done := make(chan bool) var walkResult result.Result go func() { opts := deploy.Options{ Events: actions, Parallel: deployment.Options.Parallel, Refresh: deployment.Options.Refresh, RefreshOnly: deployment.Options.isRefresh, RefreshTargets: deployment.Options.RefreshTargets, ReplaceTargets: deployment.Options.ReplaceTargets, DestroyTargets: deployment.Options.DestroyTargets, UpdateTargets: deployment.Options.UpdateTargets, TargetDependents: deployment.Options.TargetDependents, TrustDependencies: deployment.Options.trustDependencies, UseLegacyDiff: deployment.Options.UseLegacyDiff, } walkResult = deployment.Deployment.Execute(ctx, opts, preview) close(done) }() // Asynchronously listen for cancellation, and deliver that signal to the deployment. go func() { select { case <-cancelCtx.Cancel.Canceled(): // Cancel the deployment's execution context, so it begins to shut down. cancelFunc() case <-done: return } }() // Wait for the deployment to finish executing or for the user to terminate the run. var res result.Result select { case <-cancelCtx.Cancel.Terminated(): res = result.WrapIfNonNil(cancelCtx.Cancel.TerminateErr()) case <-done: res = walkResult } duration := time.Since(start) changes := actions.Changes() // Emit a summary event. deployment.Options.Events.summaryEvent(preview, actions.MaybeCorrupt(), duration, changes, policyPacks) return changes, res
func (deployment *deployment) Close() error { return deployment.Plugctx.Close() } func assertSeen(seen map[resource.URN]deploy.Step, step deploy.Step) { _, has := seen[step.URN()] contract.Assertf(has, "URN '%v' had not been marked as seen", step.URN()) } func isDefaultProviderStep(step deploy.Step) bool { return providers.IsDefaultProvider(step.URN()) }
}
index.js
import request, { api } from '@/utils/request'; export function getCompaniesUser(params) { return request({ url: '/users/', method: 'get', params }); } export function putCompanyUserInfo(data) { return request({ url: '/users/' + data.id + '/', method: 'put', data }); } export function postCompanyUser(data) { return request({ url: '/users/', method: 'post', data }); } //缺少删除 export function deleteCompanyUser(data) { return request({ url: '/users/' + data.id + '/', method: 'delete' }); } //不可用 export function putUserPwd(data) { return request({ url: `/users/${data.id}/passwords/`, method: 'put', data }); } export function getCategories(params) { return request({ url: '/categories/', method: 'get', params }); } export function postCategories(data) { return request({ url: '/categories/', method: 'post', data }); } export function putCateById(data) { return request({ url: `/categories/${data.id}/`, method: 'put', data }); } export function getNewsListByCateId(id, page) { return request({ url: `/categories/${id}/news/`, method: 'get', params: { page } }); } export function getVideoToken() { return api.instance({ url: `/qiniu/tokens/`, method: 'get' }); } export function postCategoriesNews(cateId, data) { return request({ url: `/categories/${cateId}/news/`, method: 'post', data }); } export function deleteNews(cateId, newId) { return request({ url: `/categories/${cateId}/news/${newId}/`, method: 'delete' }); } export function deleteCate(cateId) { return request({ url: `/categories/${cateId}/`, method: 'delete' }); } export function modifyNews(cateId, data) { return request({ url: `/categories/${cateId}/news/${data.id}/`, method: 'put', data }); } export function getActivities({ page }) { return request({ url: '/activities/', method: 'get', params: { page } }); } export function postActivities(data) { return request({ url: '/activities/', method: 'post', data }); } export function putActivities(data) { return request({ url: `/activities/${data.id}/`, method: 'put', data }); } export function deleteActivities(id) { return request({ url: `/activities/${id}/`, method: 'delete' }); } export function getQuestions(actId, page) { return request({ url: `/activities/${actId}/questions/`, method: 'get', params: { page } }); } export function postQuestion(actId, data) { return request({ url: `/activities/${actId}/questions/`, method: 'post', data }); } export function putQuestion(actId, data) { return request({ url: `/activities/${actId}/questions/${data.id}/`, method: 'put', data }); } export function deleteQuestion(actId, id) { return request({ url: `/activities/${actId}/questions/${id}/`, method: 'delete' }); } export function getAds(page) { return request({ url: `/content_categories/`, method: 'get', params: { page } }); } export function postAd(data) {
urn request({ url: `/content_categories/`, method: 'post', data }); } export function deleteAd(id) { return request({ url: `/content_categories/${id}/`, method: 'delete' }); } export function putAd(data) { return request({ url: `/content_categories/${data.id}/`, method: 'put', data }); } export function getCateAds(id, page) { return request({ url: `/categories/${id}/contents/`, method: 'get', params: { page } }); } export function postCateAds(id, data) { return request({ url: `/categories/${id}/contents/`, method: 'post', data }); } export function deleteCateAds(id, cid) { return request({ url: `/categories/${id}/contents/${cid}/`, method: 'delete' }); } export function putCateAds(id, data) { return request({ url: `/categories/${id}/contents/${data.id}/`, method: 'put', data }); } export function getGoods(page) { return request({ url: `/goods/`, method: 'get', params: { page } }); } export function postGoods(data) { return request({ url: `/goods/`, method: 'post', data }); } export function putGoods(data) { return request({ url: `/goods/${data.id}/`, method: 'put', data }); } export function deleteGoods(id) { return request({ url: `/goods/${id}/`, method: 'delete' }); } export function getGoodsImg(id, page) { return request({ url: `/goods/${id}/images/`, method: 'get', params: { page } }); } export function deleteGoodsImg(id, imgId) { return request({ url: `/goods/${id}/images/${imgId}/`, method: 'delete' }); } export function postGoodsImg(id, data) { return request({ url: `/goods/${id}/images/`, method: 'post', data }); } export function getOrders(params) { return request({ url: `/orders/`, method: 'get', params }); } export function putOrders(data) { return request({ url: `/orders/${data.id}/`, method: 'put', data }); } export function postLives(data) { return request({ url: `/lives/`, method: 'post', data }); } export function getLives(params) { return request({ url: `/lives/`, method: 'get', params }); } export function putLives(data) { return request({ url: `/lives/${data.id}/`, method: 'put', data }); } export function deleteLives(id) { return request({ url: `/lives/${id}/`, method: 'delete' }); }
ret
index.tsx
import React from 'react' import { AppState } from 'store' import { connect } from 'react-redux' import styled from 'styled-components' import Task from 'components/Common/Task' import { ITaskState } from 'store/tasks/types' import { dragAndDrop } from 'store/tasks/actions' import { getKanbanOption } from 'store/show/selectors' import Button from 'components/Common/TaskWrapper/Button' import IconOval from 'components/Common/Icons/Common/Oval' const variables = { color: '#0062ff', colorBorder: '#e2e2ea', crossSize: 16 } const Wrapper = styled.div` width: ${(props: ITaskWrapperProps) => (props.option ? '280px' : 'auto')}; ` const Header = styled.div` border-radius: 15px 15px 0 0; border-top: 1px solid ${variables.colorBorder}; border-left: 1px solid ${variables.colorBorder}; border-right: 1px solid ${variables.colorBorder}; display: flex; justify-content: space-between; ` const Title = styled.span` font-size: 16px; letter-spacing: 0.1px; color: #696974; padding: 15px 20px; ` const More = styled.div` padding: 0 20px; display: flex; align-items: center; cursor: not-allowed; @media (max-width: 450px) { display: none; } ` const TasksWrapper = styled.div<DragWrapperProps>`
padding: 20px 0; background: ${props => props.dragOver ? `repeating-linear-gradient( 45deg, white, white 5px, #E3ECFB 5px, #E3ECFB 10px )` : 'none'}; ` type DragWrapperProps = { dragOver: boolean } interface ITaskWrapperProps { dragAndDrop: typeof dragAndDrop data: ITaskState[] type: string option: boolean } const Tasks: Function = (props: ITaskWrapperProps): JSX.Element[] => { return props.data.map((item: ITaskState) => ( <Task data={item} key={item.id} /> )) } const TaskWrapper: React.FC<ITaskWrapperProps> = props => { const { type, dragAndDrop } = props const [dragOver, setDragOver] = React.useState<boolean>(false) const onDragOver = (e: React.DragEvent<HTMLDivElement>): void => { e.preventDefault() } const onDragEnter = (): void => { setDragOver(prevState => !prevState) } const onDragLeave = (): void => { setDragOver(prevState => !prevState) } const onDrop = (e: React.DragEvent<HTMLDivElement>): void => { dragAndDrop(e, type) setDragOver(false) } return ( <Wrapper onDrop={onDrop} onDragOver={onDragOver} onDragEnter={onDragEnter} onDragLeave={onDragLeave} {...props} > <Header> <Title>{type}</Title> <More> <IconOval /> </More> </Header> <TasksWrapper dragOver={dragOver}> <Tasks {...props} /> </TasksWrapper> <Button /> </Wrapper> ) } const mapStateToProps = (state: AppState) => { return { option: getKanbanOption(state) } } const mapDispatchToProps = { dragAndDrop } export default connect( mapStateToProps, mapDispatchToProps )(TaskWrapper)
height: auto; border-left: 1px solid ${variables.colorBorder}; border-right: 1px solid ${variables.colorBorder};
radam.py
# Copyright [yyyy] [name of copyright owner] # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """RAdam Optimizer. Implementation lifted from: https://github.com/LiyuanLucasLiu/RAdam Paper: `On the Variance of the Adaptive Learning Rate and Beyond` - https://arxiv.org/abs/1908.03265 """ import math import torch from torch.optim.optimizer import Optimizer, required class RAdam(Optimizer): def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
def __setstate__(self, state): super(RAdam, self).__setstate__(state) def step(self, closure=None): loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError('RAdam does not support sparse gradients') p_data_fp32 = p.data.float() state = self.state[p] if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_data_fp32) state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) exp_avg.mul_(beta1).add_(1 - beta1, grad) state['step'] += 1 buffered = self.buffer[int(state['step'] % 10)] if state['step'] == buffered[0]: N_sma, step_size = buffered[1], buffered[2] else: buffered[0] = state['step'] beta2_t = beta2 ** state['step'] N_sma_max = 2 / (1 - beta2) - 1 N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) buffered[1] = N_sma # more conservative since it's an approximated value if N_sma >= 5: step_size = group['lr'] * math.sqrt( (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / ( N_sma_max - 2)) / (1 - beta1 ** state['step']) else: step_size = group['lr'] / (1 - beta1 ** state['step']) buffered[2] = step_size if group['weight_decay'] != 0: p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32) # more conservative since it's an approximated value if N_sma >= 5: denom = exp_avg_sq.sqrt().add_(group['eps']) p_data_fp32.addcdiv_(-step_size, exp_avg, denom) else: p_data_fp32.add_(-step_size, exp_avg) p.data.copy_(p_data_fp32) return loss class PlainRAdam(Optimizer): def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) super(PlainRAdam, self).__init__(params, defaults) def __setstate__(self, state): super(PlainRAdam, self).__setstate__(state) def step(self, closure=None): loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError('RAdam does not support sparse gradients') p_data_fp32 = p.data.float() state = self.state[p] if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_data_fp32) state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) exp_avg.mul_(beta1).add_(1 - beta1, grad) state['step'] += 1 beta2_t = beta2 ** state['step'] N_sma_max = 2 / (1 - beta2) - 1 N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) if group['weight_decay'] != 0: p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32) # more conservative since it's an approximated value if N_sma >= 5: step_size = group['lr'] * math.sqrt( (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / ( N_sma_max - 2)) / (1 - beta1 ** state['step']) denom = exp_avg_sq.sqrt().add_(group['eps']) p_data_fp32.addcdiv_(-step_size, exp_avg, denom) else: step_size = group['lr'] / (1 - beta1 ** state['step']) p_data_fp32.add_(-step_size, exp_avg) p.data.copy_(p_data_fp32) return loss
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) self.buffer = [[None, None, None] for ind in range(10)] super(RAdam, self).__init__(params, defaults)
label.go
// Copyright 2016 The Gogs Authors. All rights reserved. // Copyright 2018 The Gitea Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package repo import ( "fmt" "net/http" "strconv" "strings" "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/convert" api "code.gitea.io/gitea/modules/structs" "code.gitea.io/gitea/modules/web" "code.gitea.io/gitea/routers/api/v1/utils" ) // ListLabels list all the labels of a repository func ListLabels(ctx *context.APIContext) { // swagger:operation GET /repos/{owner}/{repo}/labels issue issueListLabels // --- // summary: Get all of a repository's labels // produces: // - application/json // parameters: // - name: owner // in: path // description: owner of the repo // type: string // required: true // - name: repo // in: path // description: name of the repo // type: string // required: true // - name: page // in: query // description: page number of results to return (1-based) // type: integer // - name: limit // in: query // description: page size of results // type: integer // responses: // "200": // "$ref": "#/responses/LabelList" labels, err := models.GetLabelsByRepoID(ctx.Repo.Repository.ID, ctx.Query("sort"), utils.GetListOptions(ctx)) if err != nil { ctx.Error(http.StatusInternalServerError, "GetLabelsByRepoID", err) return } ctx.JSON(http.StatusOK, convert.ToLabelList(labels)) } // GetLabel get label by repository and label id func GetLabel(ctx *context.APIContext) { // swagger:operation GET /repos/{owner}/{repo}/labels/{id} issue issueGetLabel // --- // summary: Get a single label // produces: // - application/json // parameters: // - name: owner // in: path // description: owner of the repo // type: string // required: true // - name: repo // in: path // description: name of the repo // type: string // required: true // - name: id // in: path // description: id of the label to get // type: integer // format: int64 // required: true // responses: // "200": // "$ref": "#/responses/Label" var ( label *models.Label err error ) strID := ctx.Params(":id") if intID, err2 := strconv.ParseInt(strID, 10, 64); err2 != nil { label, err = models.GetLabelInRepoByName(ctx.Repo.Repository.ID, strID) } else { label, err = models.GetLabelInRepoByID(ctx.Repo.Repository.ID, intID) } if err != nil { if models.IsErrRepoLabelNotExist(err) { ctx.NotFound() } else { ctx.Error(http.StatusInternalServerError, "GetLabelByRepoID", err) } return } ctx.JSON(http.StatusOK, convert.ToLabel(label)) } // CreateLabel create a label for a repository func CreateLabel(ctx *context.APIContext) { // swagger:operation POST /repos/{owner}/{repo}/labels issue issueCreateLabel // --- // summary: Create a label // consumes: // - application/json // produces: // - application/json // parameters: // - name: owner // in: path // description: owner of the repo // type: string // required: true // - name: repo // in: path // description: name of the repo // type: string // required: true // - name: body // in: body // schema: // "$ref": "#/definitions/CreateLabelOption" // responses: // "201": // "$ref": "#/responses/Label" // "422": // "$ref": "#/responses/validationError" form := web.GetForm(ctx).(*api.CreateLabelOption) form.Color = strings.Trim(form.Color, " ") if len(form.Color) == 6 { form.Color = "#" + form.Color } if !models.LabelColorPattern.MatchString(form.Color) { ctx.Error(http.StatusUnprocessableEntity, "ColorPattern", fmt.Errorf("bad color code: %s", form.Color)) return } label := &models.Label{ Name: form.Name, Color: form.Color, RepoID: ctx.Repo.Repository.ID, Description: form.Description, } if err := models.NewLabel(label); err != nil { ctx.Error(http.StatusInternalServerError, "NewLabel", err) return } ctx.JSON(http.StatusCreated, convert.ToLabel(label)) } // EditLabel modify a label for a repository func EditLabel(ctx *context.APIContext) { // swagger:operation PATCH /repos/{owner}/{repo}/labels/{id} issue issueEditLabel // --- // summary: Update a label // consumes: // - application/json // produces: // - application/json // parameters: // - name: owner // in: path // description: owner of the repo // type: string // required: true // - name: repo // in: path // description: name of the repo // type: string // required: true // - name: id // in: path // description: id of the label to edit // type: integer // format: int64 // required: true // - name: body // in: body // schema: // "$ref": "#/definitions/EditLabelOption" // responses: // "200": // "$ref": "#/responses/Label" // "422": // "$ref": "#/responses/validationError" form := web.GetForm(ctx).(*api.EditLabelOption) label, err := models.GetLabelInRepoByID(ctx.Repo.Repository.ID, ctx.ParamsInt64(":id")) if err != nil { if models.IsErrRepoLabelNotExist(err) { ctx.NotFound() } else { ctx.Error(http.StatusInternalServerError, "GetLabelByRepoID", err) } return } if form.Name != nil { label.Name = *form.Name } if form.Color != nil { label.Color = strings.Trim(*form.Color, " ") if len(label.Color) == 6 { label.Color = "#" + label.Color } if !models.LabelColorPattern.MatchString(label.Color) { ctx.Error(http.StatusUnprocessableEntity, "ColorPattern", fmt.Errorf("bad color code: %s", label.Color)) return } } if form.Description != nil {
label.Description = *form.Description } if err := models.UpdateLabel(label); err != nil { ctx.Error(http.StatusInternalServerError, "UpdateLabel", err) return } ctx.JSON(http.StatusOK, convert.ToLabel(label)) } // DeleteLabel delete a label for a repository func DeleteLabel(ctx *context.APIContext) { // swagger:operation DELETE /repos/{owner}/{repo}/labels/{id} issue issueDeleteLabel // --- // summary: Delete a label // parameters: // - name: owner // in: path // description: owner of the repo // type: string // required: true // - name: repo // in: path // description: name of the repo // type: string // required: true // - name: id // in: path // description: id of the label to delete // type: integer // format: int64 // required: true // responses: // "204": // "$ref": "#/responses/empty" if err := models.DeleteLabel(ctx.Repo.Repository.ID, ctx.ParamsInt64(":id")); err != nil { ctx.Error(http.StatusInternalServerError, "DeleteLabel", err) return } ctx.Status(http.StatusNoContent) }
karma.conf.js
// Karma configuration // Generated on Sun Jun 30 2013 00:14:30 GMT-0700 (PDT) // base path, that will be used to resolve files and exclude basePath = ''; preprocessors = { 'src/**/*.js': 'coverage' }; // list of files / patterns to load in the browser files = [ 'bower_components/jquery/jquery.js', 'src/common/utils.js', 'src/bloodhound/version.js', 'src/bloodhound/tokenizers.js', 'src/bloodhound/lru_cache.js', 'src/bloodhound/persistent_storage.js', 'src/bloodhound/transport.js', 'src/bloodhound/search_index.js', 'src/bloodhound/options_parser.js', 'src/bloodhound/bloodhound.js', 'src/typeahead/html.js', 'src/typeahead/css.js', 'src/typeahead/event_bus.js', 'src/typeahead/event_emitter.js', 'src/typeahead/highlight.js', 'src/typeahead/input.js', 'src/typeahead/dataset.js', 'src/typeahead/dropdown.js', 'src/typeahead/typeahead.js', 'src/typeahead/plugin.js', JASMINE, JASMINE_ADAPTER, 'test/fixtures/**/*', 'bower_components/jasmine-jquery/lib/jasmine-jquery.js', 'bower_components/jasmine-ajax/lib/mock-ajax.js', 'test/helpers/**/*', 'test/*_spec.js' ]; // list of files to exclude exclude = []; // test results reporter to use // possible values: 'dots', 'progress', 'junit' reporters = ['progress', 'coverage']; // web server port port = 9876; // cli runner port runnerPort = 9100; // code coverage configs coverageReporter = { type: 'html', dir: 'test/coverage/' }
colors = true; // level of logging // possible values: LOG_DISABLE || LOG_ERROR || LOG_WARN || LOG_INFO || LOG_DEBUG logLevel = LOG_INFO; // enable / disable watching file and executing tests whenever any file changes autoWatch = true; // Start these browsers, currently available: // - Chrome // - ChromeCanary // - Firefox // - Opera // - Safari (only Mac) // - PhantomJS // - IE (only Windows) browsers = ['Chrome']; // If browser does not capture in given timeout [ms], kill it captureTimeout = 60000; // Continuous Integration mode // if true, it capture browsers, run tests and exit singleRun = false;
// enable / disable colors in the output (reporters and logs)
challenge.js
let XMLHttpRequest = require('xmlhttprequest').XMLHttpRequest let API = 'https://rickandmortyapi.com/api/character/' function
(url_api, callback) { let xhttp = new XMLHttpRequest() xhttp.open('GET', url_api, true) xhttp.onreadystatechange = function (event) { if (xhttp.readyState === 4) { if(xhttp.status === 200) { callback(null, JSON.parse(xhttp.responseText)) } else { const error = new Error('Error ' + url_api) return callback(error, null) } } } xhttp.send() } fetchData(API, function(error1, data1) { if (error1) { return console.error(error1) } console.log('Paso la primera') fetchData(API + data1.results[0].id, function (error2, data2) { if(error2) { return console.error(error2) } console.log('Paso la segunda') fetchData(data2.origin.url, function (error3, data3) { if (error3) { return console.error(error3) } console.log('Paso la tercera') console.log(data1.info.count) console.log(data2.name) console.log(data3.dimension) }) }) })
fetchData
Controller.ts
import { merge } from 'rxjs'; import { delay, distinctUntilChanged, filter, map, take } from 'rxjs/operators'; import { defaultValue, PeerJS, PeerJsUtil, R, rx, slug, StreamUtil, StringUtil, t, time, WebRuntime, DEFAULT, } from './common'; import { PeerEvents } from './event'; import { MemoryRefs, SelfRef } from './Refs'; import { Status } from './Status'; type ConnectionKind = t.PeerNetworkConnectRes['kind']; /** * EventBus contoller for a WebRTC [Peer] connection. */ export function
(args: { bus: t.EventBus<any> }) { const bus = args.bus as t.EventBus<t.PeerEvent>; const events = PeerEvents(bus); const $ = events.$; const refs = MemoryRefs(); const dispose = () => { events.dispose(); refs.dispose(); window.removeEventListener('online', handleOnlineStatusChanged); window.removeEventListener('offline', handleOnlineStatusChanged); }; /** * Monitor network connectivity. */ const handleOnlineStatusChanged = (e: Event) => { Object.keys(refs.self).forEach((ref) => { bus.fire({ type: 'sys.net/peer/local/online:changed', payload: { self: ref, isOnline: navigator.onLine }, }); }); }; window.addEventListener('online', handleOnlineStatusChanged); window.addEventListener('offline', handleOnlineStatusChanged); /** * Initialize a new PeerJS data-connection. */ const completeConnection = ( kind: ConnectionKind, direction: t.PeerNetworkConnectRes['direction'], self: SelfRef, conn: PeerJS.DataConnection | PeerJS.MediaConnection, tx?: string, ) => { const connRef = refs.connection(self).get(conn); bus.fire({ type: 'sys.net/peer/conn/connect:res', payload: { self: self.id, tx: tx || slug(), kind, direction, existing: false, remote: connRef.peer.remote.id, connection: Status.toConnection(connRef), }, }); conn.on('close', async () => { /** * NOTE: * The close event is not being fired for [Media] connections. * Issue: https://github.com/peers/peerjs/issues/780 * * See work-around that uses the [netbus] "connection.ensureClosed" strategy. */ const peer = connRef.peer; events.connection(peer.self, peer.remote.id).close(connRef.id); }); if (kind === 'data') { const data = conn as PeerJS.DataConnection; data.on('data', (data: any) => { if (typeof data === 'object') { const source = { peer: connRef.peer.remote.id, connection: connRef.id }; bus.fire({ type: 'sys.net/peer/data/in', payload: { self: self.id, data, source }, }); } }); } return connRef; }; const initLocalPeer = (e: t.PeerLocalCreateReq) => { const createdAt = time.now.timestamp; const signal = StringUtil.parseEndpointAddress({ address: e.signal, key: DEFAULT.PEERJS_KEY }); const { host, path, port, secure, key } = signal; const peer = new PeerJS(e.self, { host, path, port, secure, key }); const self: SelfRef = { id: e.self, peer, createdAt, signal, connections: [], media: {} }; /** * Listen for incoming DATA connection requests. */ peer.on('connection', (dataConnection) => { dataConnection.on('open', () => { refs.connection(self).add('data', 'incoming', dataConnection); completeConnection('data', 'incoming', self, dataConnection); }); }); /** * Listen for incoming MEDIA connection requests (video/screen). */ peer.on('call', async (mediaConnection) => { const metadata = (mediaConnection.metadata || {}) as t.PeerConnectionMetadataMedia; const { kind, constraints } = metadata; const answer = (localStream?: MediaStream) => { mediaConnection.answer(localStream); mediaConnection.on('stream', (remoteStream) => { refs.connection(self).add(kind, 'incoming', mediaConnection, remoteStream); completeConnection(kind, 'incoming', self, mediaConnection); }); }; if (kind === 'media/video') { const local = await events.media(self.id).request({ kind, constraints }); answer(local.media); } if (kind === 'media/screen') { // NB: Screen shares do not send back another stream so do // not request it from the user. answer(); } }); // Finish up. return self; }; /** * CREATE a new network client. */ rx.payload<t.PeerLocalInitReqEvent>($, 'sys.net/peer/local/init:req') .pipe(delay(0)) .subscribe((e) => { const id = e.self; if (!refs.self[id]) refs.self[id] = initLocalPeer(e); const self = refs.self[id]; bus.fire({ type: 'sys.net/peer/local/init:res', payload: { self: e.self, createdAt: self.createdAt, signal: self.signal }, }); }); /** * STATUS */ rx.payload<t.PeerLocalStatusReqEvent>($, 'sys.net/peer/local/status:req') .pipe(delay(0)) .subscribe((e) => { const tx = e.tx || slug(); const self = refs.self[e.self]; const peer = self ? Status.toSelf(self) : undefined; const exists = Boolean(peer); bus.fire({ type: 'sys.net/peer/local/status:res', payload: { self: e.self, tx, exists, peer }, }); }); /** * STATUS CHANGE */ const statusChanged$ = merge( $.pipe( filter((e) => { const types: t.PeerEvent['type'][] = [ 'sys.net/peer/local/init:res', 'sys.net/peer/local/purge:res', 'sys.net/peer/local/online:changed', 'sys.net/peer/conn/connect:res', 'sys.net/peer/conn/disconnect:res', ]; return types.includes(e.type); }), ), ).pipe( map((event) => ({ selfRef: refs.self[event.payload.self], event })), filter((e) => Boolean(e.selfRef)), map((e) => ({ event: e.event, status: Status.toSelf(e.selfRef) })), distinctUntilChanged((prev, next) => R.equals(prev.status, next.status)), ); statusChanged$.subscribe((e) => { bus.fire({ type: 'sys.net/peer/local/status:changed', payload: { self: e.status.id, peer: e.status, event: e.event }, }); }); rx.event<t.PeerLocalStatusRefreshEvent>($, 'sys.net/peer/local/status:refresh') .pipe() .subscribe((event) => { const { self } = event.payload; const selfRef = refs.self[self]; if (selfRef) { bus.fire({ type: 'sys.net/peer/local/status:changed', payload: { self, peer: Status.toSelf(selfRef), event }, }); } }); /** * PURGE */ rx.payload<t.PeerLocalPurgeReqEvent>($, 'sys.net/peer/local/purge:req') .pipe() .subscribe((e) => { const tx = e.tx || slug(); const self = refs.self[e.self]; const select = typeof e.select === 'object' ? e.select : { closedConnections: true }; let changed = false; const purged: t.PeerLocalPurged = { closedConnections: { data: 0, video: 0, screen: 0 }, }; const fire = (payload?: Partial<t.PeerLocalPurgeRes>) => { bus.fire({ type: 'sys.net/peer/local/purge:res', payload: { self: e.self, tx, changed, purged, ...payload }, }); }; const fireError = (message: string) => fire({ error: { message } }); if (!self) { const message = `The local PeerNetwork '${e.self}' does not exist`; return fireError(message); } if (select.closedConnections) { const closed = self.connections.filter((item) => !Status.toConnection(item).isOpen); self.connections = self.connections.filter( ({ peer: id }) => !closed.some((c) => c.peer === id), ); closed.forEach((item) => { changed = true; if (item.kind === 'data') purged.closedConnections.data++; if (item.kind === 'media/video') purged.closedConnections.video++; if (item.kind === 'media/screen') purged.closedConnections.screen++; }); } fire(); }); /** * CONNECT: Outgoing */ rx.payload<t.PeerConnectReqEvent>($, 'sys.net/peer/conn/connect:req') .pipe(filter((e) => e.direction === 'outgoing')) .subscribe(async (e) => { const { remote } = e; const self = refs.self[e.self]; const tx = e.tx || slug(); const module = { name: WebRuntime.module.name, version: WebRuntime.module.version }; const userAgent = navigator.userAgent; const parent = e.parent; const fire = (payload?: Partial<t.PeerNetworkConnectRes>) => { const existing = Boolean(payload?.existing); bus.fire({ type: 'sys.net/peer/conn/connect:res', payload: { kind: e.kind, self: e.self, tx, remote, direction: 'outgoing', existing, ...payload, }, }); }; const fireError = (message: string) => fire({ error: { message } }); if (!self) { const message = `The local PeerNetwork '${e.self}' does not exist`; return fireError(message); } if (self.id === remote) { const message = `Cannot connect to self`; return fireError(message); } /** * START a data connection. */ if (e.kind === 'data') { const metadata: t.PeerConnectionMetadataData = { kind: e.kind, module, userAgent, parent }; const reliable = e.isReliable; const errorMonitor = PeerJsUtil.error(self.peer); const dataConnection = self.peer.connect(remote, { reliable, metadata }); refs.connection(self).add('data', 'outgoing', dataConnection); dataConnection.on('open', () => { // SUCCESS: Connected to the remote peer. errorMonitor.dispose(); completeConnection('data', 'outgoing', self, dataConnection, tx); }); // Listen for a connection error. // Will happen on timeout (remote peer not found on the network) errorMonitor.$.pipe( filter((err) => err.type === 'peer-unavailable'), filter((err) => err.message.includes(`peer ${remote}`)), take(1), ).subscribe((err) => { // FAIL errorMonitor.dispose(); refs.connection(self).remove(dataConnection); fireError(`Failed to connect to peer '${remote}'. The remote target did not respond.`); }); } /** * START a media (video) call. */ if (e.kind === 'media/video' || e.kind === 'media/screen') { const { constraints } = e; // Retrieve the media stream. const res = await events.media(self.id).request({ kind: e.kind, constraints }); const localStream = res.media; if (res.error || !localStream) { const err = res.error?.message || `Failed to retrieve a local media stream (${self.id}).`; return fireError(err); } // Start the network/peer connection. const metadata: t.PeerConnectionMetadataMedia = { kind: e.kind, constraints, module, userAgent, parent, }; const mediaConnection = self.peer.call(remote, localStream, { metadata }); const connRef = refs.connection(self).add(e.kind, 'outgoing', mediaConnection); connRef.localStream = localStream; // Manage timeout. const msecs = defaultValue(e.timeout, 10 * 1000); const timeout = time.delay(msecs, () => { refs.connection(self).remove(mediaConnection); const err = `Failed to connect [${e.kind}] to peer '${remote}'. The connection attempt timed out.`; fireError(err); }); const completeMediaConnection = () => { timeout.cancel(); completeConnection(e.kind, 'outgoing', self, mediaConnection, tx); }; if (e.kind === 'media/video') { mediaConnection.on('stream', (remoteStream) => { if (timeout.isCancelled) return; connRef.remoteStream = remoteStream; completeMediaConnection(); }); } if (e.kind === 'media/screen') { // NB: Complete immediately without waiting for return stream. // Screen shares are always one-way (out) so there will be no incoming stream. const completeUponOpen = () => { if (!mediaConnection.open) return time.delay(50, completeUponOpen); return completeMediaConnection(); }; completeUponOpen(); } // Listen for external ending of the stream and clean up accordingly. StreamUtil.onEnded(localStream, () => { events.connection(self.id, remote).close(connRef.id); }); } }); /** * DISCONNECT from a remote peer. */ rx.payload<t.PeerDisconnectReqEvent>($, 'sys.net/peer/conn/disconnect:req') .pipe(filter((e) => Boolean(refs.self[e.self]))) .subscribe(async (e) => { const selfRef = refs.self[e.self]; const tx = e.tx || slug(); const fire = (payload?: Partial<t.PeerNetworkDisconnectRes>) => { const connection = e.connection; bus.fire({ type: 'sys.net/peer/conn/disconnect:res', payload: { self: e.self, tx, connection, ...payload }, }); }; const fireError = (message: string) => fire({ error: { message } }); if (!selfRef) { const message = `The local PeerNetwork '${e.self}' does not exist`; return fireError(message); } const connRef = selfRef.connections.find((item) => item.id === e.connection); if (!connRef) { const message = `The connection to close '${e.connection}' does not exist`; return fireError(message); } // Ensure all child connections are closed. const children = selfRef.connections.filter(({ parent }) => parent === e.connection); await Promise.all( children.map((child) => { const { self, remote } = child.peer; return events.connection(self, remote.id).close(child.id); }), ); // Close the connection. if (connRef.conn.open) connRef.conn.close(); fire({}); }); /** * DATA:OUT: Send */ rx.payload<t.PeerDataOutReqEvent>($, 'sys.net/peer/data/out:req') .pipe(filter((e) => Boolean(refs.self[e.self]))) .subscribe((e) => { const selfRef = refs.self[e.self]; const tx = e.tx || slug(); const targets = selfRef.connections .filter((ref) => ref.kind === 'data') .filter((ref) => (e.targets || []).some((uri) => uri === ref.uri)); // Send the data over the wire. targets.forEach((ref) => { (ref.conn as PeerJS.DataConnection).send(e.data); }); // Fire response event. bus.fire({ type: 'sys.net/peer/data/out:res', payload: { tx, self: e.self, sent: targets.map((ref) => ({ peer: ref.peer.remote.id, connection: ref.id })), data: e.data, }, }); }); /** * REMOTE: exists */ rx.payload<t.PeerRemoteExistsReqEvent>($, 'sys.net/peer/remote/exists:req') .pipe() .subscribe(async (e) => { const { tx, self, remote } = e; const connection = events.connection(self, remote); let exists = false; const res = await connection.open.data({ isReliable: false }); const id = res.connection?.id; if (!res.error) exists = true; if (id) connection.close(id); // Clean up. // Fire response event. bus.fire({ type: 'sys.net/peer/remote/exists:res', payload: { tx, self, remote, exists }, }); }); /** * API */ return { dispose$: events.dispose$.pipe(take(1)), dispose, }; }
Controller
data_point.py
# coding: utf-8 """ axxell-api No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from pprint import pformat from six import iteritems import re class DataPoint(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, creation_time=None, label=None, value=None): """ DataPoint - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'creation_time': 'str', 'label': 'str', 'value': 'float' } self.attribute_map = { 'creation_time': 'creationTime', 'label': 'label', 'value': 'value' } self._creation_time = creation_time self._label = label self._value = value @property def creation_time(self): """ Gets the creation_time of this DataPoint. :return: The creation_time of this DataPoint. :rtype: str """ return self._creation_time @creation_time.setter def creation_time(self, creation_time): """ Sets the creation_time of this DataPoint. :param creation_time: The creation_time of this DataPoint. :type: str """ self._creation_time = creation_time @property def label(self): """ Gets the label of this DataPoint. :return: The label of this DataPoint. :rtype: str """ return self._label @label.setter def label(self, label): """ Sets the label of this DataPoint. :param label: The label of this DataPoint. :type: str """ self._label = label @property def value(self): """ Gets the value of this DataPoint. :return: The value of this DataPoint. :rtype: float """ return self._value @value.setter def value(self, value): """ Sets the value of this DataPoint. :param value: The value of this DataPoint. :type: float """ self._value = value def
(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
to_dict
podtemplate.go
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file was automatically generated by informer-gen package v1 import ( core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" v1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" time "time" ) // PodTemplateInformer provides access to a shared informer and lister for // PodTemplates. type PodTemplateInformer interface { Informer() cache.SharedIndexInformer Lister() v1.PodTemplateLister } type podTemplateInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc namespace string } // NewPodTemplateInformer constructs a new informer for PodTemplate type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewPodTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { return NewFilteredPodTemplateInformer(client, namespace, resyncPeriod, indexers, nil) } // NewFilteredPodTemplateInformer constructs a new informer for PodTemplate type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewFilteredPodTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil {
} return client.CoreV1().PodTemplates(namespace).List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } return client.CoreV1().PodTemplates(namespace).Watch(options) }, }, &core_v1.PodTemplate{}, resyncPeriod, indexers, ) } func (f *podTemplateInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { return NewFilteredPodTemplateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *podTemplateInformer) Informer() cache.SharedIndexInformer { return f.factory.InformerFor(&core_v1.PodTemplate{}, f.defaultInformer) } func (f *podTemplateInformer) Lister() v1.PodTemplateLister { return v1.NewPodTemplateLister(f.Informer().GetIndexer()) }
tweakListOptions(&options)
handler_test.go
package runtime_test import ( "context" "io" "io/ioutil" "net/http" "net/http/httptest" "testing" "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" pb "github.com/grpc-ecosystem/grpc-gateway/v2/runtime/internal/examplepb" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" ) type fakeReponseBodyWrapper struct { proto.Message } // XXX_ResponseBody returns id of SimpleMessage func (r fakeReponseBodyWrapper) XXX_ResponseBody() interface{} { resp := r.Message.(*pb.SimpleMessage) return resp.Id } func TestForwardResponseStream(t *testing.T) { type msg struct { pb proto.Message err error } tests := []struct { name string msgs []msg statusCode int responseBody bool }{{ name: "encoding", msgs: []msg{ {&pb.SimpleMessage{Id: "One"}, nil}, {&pb.SimpleMessage{Id: "Two"}, nil}, }, statusCode: http.StatusOK, }, { name: "empty", statusCode: http.StatusOK, }, { name: "error", msgs: []msg{{nil, status.Errorf(codes.OutOfRange, "400")}}, statusCode: http.StatusBadRequest, }, { name: "stream_error", msgs: []msg{ {&pb.SimpleMessage{Id: "One"}, nil}, {nil, status.Errorf(codes.OutOfRange, "400")}, }, statusCode: http.StatusOK, }, { name: "response body stream case", msgs: []msg{ {fakeReponseBodyWrapper{&pb.SimpleMessage{Id: "One"}}, nil}, {fakeReponseBodyWrapper{&pb.SimpleMessage{Id: "Two"}}, nil}, }, responseBody: true, statusCode: http.StatusOK, }, { name: "response body stream error case", msgs: []msg{ {fakeReponseBodyWrapper{&pb.SimpleMessage{Id: "One"}}, nil}, {nil, status.Errorf(codes.OutOfRange, "400")}, }, responseBody: true, statusCode: http.StatusOK, }} newTestRecv := func(t *testing.T, msgs []msg) func() (proto.Message, error) { var count int return func() (proto.Message, error) { if count == len(msgs) { return nil, io.EOF } else if count > len(msgs) { t.Errorf("recv() called %d times for %d messages", count, len(msgs)) } count++ msg := msgs[count-1] return msg.pb, msg.err } } ctx := runtime.NewServerMetadataContext(context.Background(), runtime.ServerMetadata{}) marshaler := &runtime.JSONPb{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { recv := newTestRecv(t, tt.msgs) req := httptest.NewRequest("GET", "http://example.com/foo", nil) resp := httptest.NewRecorder() runtime.ForwardResponseStream(ctx, runtime.NewServeMux(), marshaler, resp, req, recv) w := resp.Result() if w.StatusCode != tt.statusCode { t.Errorf("StatusCode %d want %d", w.StatusCode, tt.statusCode) } if h := w.Header.Get("Transfer-Encoding"); h != "chunked" { t.Errorf("ForwardResponseStream missing header chunked") } body, err := ioutil.ReadAll(w.Body) if err != nil { t.Errorf("Failed to read response body with %v", err) } w.Body.Close() var want []byte counter := 0 for i, msg := range tt.msgs { if msg.err != nil { if i == 0 { // Skip non-stream errors t.Skip("checking error encodings") } st := status.Convert(msg.err) b, err := marshaler.Marshal(map[string]proto.Message{ "error": st.Proto(), }) if err != nil { t.Errorf("marshaler.Marshal() failed %v", err) } errBytes := body[len(want):] if string(errBytes) != string(b) { t.Errorf("ForwardResponseStream() = \"%s\" want \"%s\"", errBytes, b) } return } var b []byte if tt.responseBody { // responseBody interface is in runtime package and test is in runtime_test package. hence can't use responseBody directly // So type casting to fakeReponseBodyWrapper struct to verify the data. rb, ok := msg.pb.(fakeReponseBodyWrapper) if !ok { t.Errorf("stream responseBody failed %v", err) } b, err = marshaler.Marshal(map[string]interface{}{"result": rb.XXX_ResponseBody(), "header_metadata": metadata.MD{}, "count": counter}) } else { b, err = marshaler.Marshal(map[string]interface{}{"result": msg.pb, "header_metadata": metadata.MD{}, "count": counter}) } counter++ if err != nil { t.Errorf("marshaler.Marshal() failed %v", err) } want = append(want, b...) want = append(want, marshaler.Delimiter()...) } if string(body) != string(want) { t.Errorf("ForwardResponseStream() = \"%s\" want \"%s\"", body, want) } }) } } // A custom marshaler implementation, that doesn't implement the delimited interface type CustomMarshaler struct { m *runtime.JSONPb } func (c *CustomMarshaler) Marshal(v interface{}) ([]byte, error) { return c.m.Marshal(v) } func (c *CustomMarshaler) Unmarshal(data []byte, v interface{}) error { return c.m.Unmarshal(data, v) } func (c *CustomMarshaler) NewDecoder(r io.Reader) runtime.Decoder { return c.m.NewDecoder(r) } func (c *CustomMarshaler) NewEncoder(w io.Writer) runtime.Encoder { return c.m.NewEncoder(w) } func (c *CustomMarshaler) ContentType(v interface{}) string { return "Custom-Content-Type" } func TestForwardResponseStreamCustomMarshaler(t *testing.T) { type msg struct { pb proto.Message err error } tests := []struct { name string msgs []msg statusCode int }{{ name: "encoding", msgs: []msg{ {&pb.SimpleMessage{Id: "One"}, nil}, {&pb.SimpleMessage{Id: "Two"}, nil}, }, statusCode: http.StatusOK, }, { name: "empty", statusCode: http.StatusOK, }, { name: "error", msgs: []msg{{nil, status.Errorf(codes.OutOfRange, "400")}}, statusCode: http.StatusBadRequest, }, { name: "stream_error", msgs: []msg{ {&pb.SimpleMessage{Id: "One"}, nil}, {nil, status.Errorf(codes.OutOfRange, "400")}, }, statusCode: http.StatusOK, }} newTestRecv := func(t *testing.T, msgs []msg) func() (proto.Message, error) { var count int return func() (proto.Message, error) { if count == len(msgs) { return nil, io.EOF } else if count > len(msgs) { t.Errorf("recv() called %d times for %d messages", count, len(msgs)) } count++ msg := msgs[count-1] return msg.pb, msg.err } }
recv := newTestRecv(t, tt.msgs) req := httptest.NewRequest("GET", "http://example.com/foo", nil) resp := httptest.NewRecorder() runtime.ForwardResponseStream(ctx, runtime.NewServeMux(), marshaler, resp, req, recv) w := resp.Result() if w.StatusCode != tt.statusCode { t.Errorf("StatusCode %d want %d", w.StatusCode, tt.statusCode) } if h := w.Header.Get("Transfer-Encoding"); h != "chunked" { t.Errorf("ForwardResponseStream missing header chunked") } body, err := ioutil.ReadAll(w.Body) if err != nil { t.Errorf("Failed to read response body with %v", err) } w.Body.Close() var want []byte counter := 0 for _, msg := range tt.msgs { if msg.err != nil { t.Skip("checking erorr encodings") } b, err := marshaler.Marshal(map[string]interface{}{"result": msg.pb, "header_metadata": metadata.MD{}, "count": counter}) if err != nil { t.Errorf("marshaler.Marshal() failed %v", err) } want = append(want, b...) want = append(want, "\n"...) counter++ } if string(body) != string(want) { t.Errorf("ForwardResponseStream() = \"%s\" want \"%s\"", body, want) } }) } } func TestForwardResponseMessage(t *testing.T) { msg := &pb.SimpleMessage{Id: "One"} tests := []struct { name string marshaler runtime.Marshaler contentType string }{{ name: "standard marshaler", marshaler: &runtime.JSONPb{}, contentType: "application/json", }, { name: "httpbody marshaler", marshaler: &runtime.HTTPBodyMarshaler{&runtime.JSONPb{}}, contentType: "application/json", }, { name: "custom marshaler", marshaler: &CustomMarshaler{&runtime.JSONPb{}}, contentType: "Custom-Content-Type", }} ctx := runtime.NewServerMetadataContext(context.Background(), runtime.ServerMetadata{}) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { req := httptest.NewRequest("GET", "http://example.com/foo", nil) resp := httptest.NewRecorder() runtime.ForwardResponseMessage(ctx, runtime.NewServeMux(), tt.marshaler, resp, req, msg) w := resp.Result() if w.StatusCode != http.StatusOK { t.Errorf("StatusCode %d want %d", w.StatusCode, http.StatusOK) } if h := w.Header.Get("Content-Type"); h != tt.contentType { t.Errorf("Content-Type %v want %v", h, tt.contentType) } body, err := ioutil.ReadAll(w.Body) if err != nil { t.Errorf("Failed to read response body with %v", err) } w.Body.Close() want, err := tt.marshaler.Marshal(msg) if err != nil { t.Errorf("marshaler.Marshal() failed %v", err) } if string(body) != string(want) { t.Errorf("ForwardResponseMessage() = \"%s\" want \"%s\"", body, want) } }) } }
ctx := runtime.NewServerMetadataContext(context.Background(), runtime.ServerMetadata{}) marshaler := &CustomMarshaler{&runtime.JSONPb{}} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) {
twoFactorLogin.ts
export default ` <p> Hi {{userName}}, </p> <p>
</p> <p> Thanks <br /> {{ siteName }} Team. </p> `;
Use <b>{{twoFactorCode}}</b> to login as {{userName}}.
worker.py
import logging import os import subprocess import sys import tempfile import time import yaml from datetime import datetime from teuthology import setup_log_file, install_except_hook from . import beanstalk from . import report from . import safepath from .config import config as teuth_config from .config import set_config_attr from .exceptions import BranchNotFoundError, SkipJob, MaxWhileTries from .kill import kill_job from .repo_utils import fetch_qa_suite, fetch_teuthology log = logging.getLogger(__name__) start_time = datetime.utcnow() restart_file_path = '/tmp/teuthology-restart-workers' stop_file_path = '/tmp/teuthology-stop-workers' def sentinel(path): if not os.path.exists(path): return False file_mtime = datetime.utcfromtimestamp(os.path.getmtime(path)) if file_mtime > start_time: return True else: return False def restart(): log.info('Restarting...') args = sys.argv[:] args.insert(0, sys.executable) os.execv(sys.executable, args) def stop(): log.info('Stopping...') sys.exit(0) def load_config(ctx=None): teuth_config.load() if ctx is not None: if not os.path.isdir(ctx.archive_dir): sys.exit("{prog}: archive directory must exist: {path}".format( prog=os.path.basename(sys.argv[0]), path=ctx.archive_dir, )) else: teuth_config.archive_base = ctx.archive_dir def main(ctx): loglevel = logging.INFO if ctx.verbose: loglevel = logging.DEBUG log.setLevel(loglevel) log_file_path = os.path.join(ctx.log_dir, 'worker.{tube}.{pid}'.format( pid=os.getpid(), tube=ctx.tube,)) setup_log_file(log_file_path) install_except_hook() load_config(ctx=ctx) set_config_attr(ctx) connection = beanstalk.connect() beanstalk.watch_tube(connection, ctx.tube) result_proc = None if teuth_config.teuthology_path is None: fetch_teuthology('master') fetch_qa_suite('master') keep_running = True while keep_running: # Check to see if we have a teuthology-results process hanging around # and if so, read its return code so that it can exit. if result_proc is not None and result_proc.poll() is not None: log.debug("teuthology-results exited with code: %s", result_proc.returncode) result_proc = None if sentinel(restart_file_path):
elif sentinel(stop_file_path): stop() load_config() job = connection.reserve(timeout=60) if job is None: continue # bury the job so it won't be re-run if it fails job.bury() job_id = job.jid log.info('Reserved job %d', job_id) log.info('Config is: %s', job.body) job_config = yaml.safe_load(job.body) job_config['job_id'] = str(job_id) if job_config.get('stop_worker'): keep_running = False try: job_config, teuth_bin_path = prep_job( job_config, log_file_path, ctx.archive_dir, ) run_job( job_config, teuth_bin_path, ctx.archive_dir, ctx.verbose, ) except SkipJob: continue # This try/except block is to keep the worker from dying when # beanstalkc throws a SocketError try: job.delete() except Exception: log.exception("Saw exception while trying to delete job") def prep_job(job_config, log_file_path, archive_dir): job_id = job_config['job_id'] safe_archive = safepath.munge(job_config['name']) job_config['worker_log'] = log_file_path archive_path_full = os.path.join( archive_dir, safe_archive, str(job_id)) job_config['archive_path'] = archive_path_full # If the teuthology branch was not specified, default to master and # store that value. teuthology_branch = job_config.get('teuthology_branch', 'master') job_config['teuthology_branch'] = teuthology_branch try: if teuth_config.teuthology_path is not None: teuth_path = teuth_config.teuthology_path else: teuth_path = fetch_teuthology(branch=teuthology_branch) # For the teuthology tasks, we look for suite_branch, and if we # don't get that, we look for branch, and fall back to 'master'. # last-in-suite jobs don't have suite_branch or branch set. ceph_branch = job_config.get('branch', 'master') suite_branch = job_config.get('suite_branch', ceph_branch) suite_repo = job_config.get('suite_repo') if suite_repo: teuth_config.ceph_qa_suite_git_url = suite_repo job_config['suite_path'] = os.path.normpath(os.path.join( fetch_qa_suite(suite_branch), job_config.get('suite_relpath', ''), )) except BranchNotFoundError as exc: log.exception("Branch not found; marking job as dead") report.try_push_job_info( job_config, dict(status='dead', failure_reason=str(exc)) ) raise SkipJob() except MaxWhileTries as exc: log.exception("Failed to fetch or bootstrap; marking job as dead") report.try_push_job_info( job_config, dict(status='dead', failure_reason=str(exc)) ) raise SkipJob() teuth_bin_path = os.path.join(teuth_path, 'virtualenv', 'bin') if not os.path.isdir(teuth_bin_path): raise RuntimeError("teuthology branch %s at %s not bootstrapped!" % (teuthology_branch, teuth_bin_path)) return job_config, teuth_bin_path def run_job(job_config, teuth_bin_path, archive_dir, verbose): safe_archive = safepath.munge(job_config['name']) if job_config.get('last_in_suite'): if teuth_config.results_server: report.try_delete_jobs(job_config['name'], job_config['job_id']) log.info('Generating results email for %s', job_config['name']) args = [ os.path.join(teuth_bin_path, 'teuthology-results'), '--timeout', str(job_config.get('results_timeout', teuth_config.results_timeout)), '--email', job_config['email'], '--archive-dir', os.path.join(archive_dir, safe_archive), '--name', job_config['name'], ] # Execute teuthology-results, passing 'preexec_fn=os.setpgrp' to # make sure that it will continue to run if this worker process # dies (e.g. because of a restart) result_proc = subprocess.Popen(args=args, preexec_fn=os.setpgrp) log.info("teuthology-results PID: %s", result_proc.pid) return log.info('Creating archive dir %s', job_config['archive_path']) safepath.makedirs('/', job_config['archive_path']) log.info('Running job %s', job_config['job_id']) suite_path = job_config['suite_path'] arg = [ os.path.join(teuth_bin_path, 'teuthology'), ] # The following is for compatibility with older schedulers, from before we # started merging the contents of job_config['config'] into job_config # itself. if 'config' in job_config: inner_config = job_config.pop('config') if not isinstance(inner_config, dict): log.warn("run_job: job_config['config'] isn't a dict, it's a %s", str(type(inner_config))) else: job_config.update(inner_config) if verbose or job_config['verbose']: arg.append('-v') arg.extend([ '--lock', '--block', '--owner', job_config['owner'], '--archive', job_config['archive_path'], '--name', job_config['name'], ]) if job_config['description'] is not None: arg.extend(['--description', job_config['description']]) arg.append('--') with tempfile.NamedTemporaryFile(prefix='teuthology-worker.', suffix='.tmp',) as tmp: yaml.safe_dump(data=job_config, stream=tmp) tmp.flush() arg.append(tmp.name) env = os.environ.copy() python_path = env.get('PYTHONPATH', '') python_path = ':'.join([suite_path, python_path]).strip(':') env['PYTHONPATH'] = python_path log.debug("Running: %s" % ' '.join(arg)) p = subprocess.Popen(args=arg, env=env) log.info("Job archive: %s", job_config['archive_path']) log.info("Job PID: %s", str(p.pid)) if teuth_config.results_server: log.info("Running with watchdog") try: run_with_watchdog(p, job_config) except Exception: log.exception("run_with_watchdog had an unhandled exception") raise else: log.info("Running without watchdog") # This sleep() is to give the child time to start up and create the # archive dir. time.sleep(5) symlink_worker_log(job_config['worker_log'], job_config['archive_path']) p.wait() if p.returncode != 0: log.error('Child exited with code %d', p.returncode) else: log.info('Success!') def run_with_watchdog(process, job_config): job_start_time = datetime.utcnow() # Only push the information that's relevant to the watchdog, to save db # load job_info = dict( name=job_config['name'], job_id=job_config['job_id'], ) # Sleep once outside of the loop to avoid double-posting jobs time.sleep(teuth_config.watchdog_interval) symlink_worker_log(job_config['worker_log'], job_config['archive_path']) while process.poll() is None: # Kill jobs that have been running longer than the global max run_time = datetime.utcnow() - job_start_time total_seconds = run_time.days * 60 * 60 * 24 + run_time.seconds if total_seconds > teuth_config.max_job_time: log.warning("Job ran longer than {max}s. Killing...".format( max=teuth_config.max_job_time)) kill_job(job_info['name'], job_info['job_id'], teuth_config.archive_base) # calling this without a status just updates the jobs updated time report.try_push_job_info(job_info) time.sleep(teuth_config.watchdog_interval) # The job finished. Let's make sure paddles knows. branches_sans_reporting = ('argonaut', 'bobtail', 'cuttlefish', 'dumpling') if job_config.get('teuthology_branch') in branches_sans_reporting: # The job ran with a teuthology branch that may not have the reporting # feature. Let's call teuthology-report (which will be from the master # branch) to report the job manually. cmd = "teuthology-report -v -D -r {run_name} -j {job_id}".format( run_name=job_info['name'], job_id=job_info['job_id']) try: log.info("Executing %s" % cmd) report_proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while report_proc.poll() is None: for line in report_proc.stdout.readlines(): log.info(line.strip()) time.sleep(1) log.info("Reported results via the teuthology-report command") except Exception: log.exception("teuthology-report failed") else: # Let's make sure that paddles knows the job is finished. We don't know # the status, but if it was a pass or fail it will have already been # reported to paddles. In that case paddles ignores the 'dead' status. # If the job was killed, paddles will use the 'dead' status. report.try_push_job_info(job_info, dict(status='dead')) def symlink_worker_log(worker_log_path, archive_dir): try: log.debug("Worker log: %s", worker_log_path) os.symlink(worker_log_path, os.path.join(archive_dir, 'worker.log')) except Exception: log.exception("Failed to symlink worker log")
restart()
defract_test.go
package defract import ( "bytes" "math" "math/rand" "os/exec" "reflect" "testing" "time" "unsafe" "github.com/go-test/deep" ) type mockType string func (v mockType) String() string { return string(v) } func (v mockType) FortyTwo() int { return 42 } func
(t *testing.T) { const sample = "Hello, 世界" eqType := func(t *testing.T, typ, expect reflect.Type) { t.Helper() if typ != expect { t.Error("expected type string, got", typ) } } // eq verifies the backing array. eq := func(t *testing.T, ptr unsafe.Pointer) { t.Helper() var ( length = (*reflect.StringHeader)(ptr).Len pointer = unsafe.Pointer((*reflect.StringHeader)(ptr).Data) ) if length != len(sample) { t.Fatalf("expected length %d, got %d", len(sample), length) } array := unsafe.Slice((*byte)(pointer), len(sample)) if string(array) != sample { t.Fatalf("expected %q, got %q", sample, string(array)) } } t.Run("nil", func(t *testing.T) { typ, got := UnderlyingPtr(nil) if got != nil { t.Fatal("unexpected non-nil ptr returned from nil") } if typ != nil { t.Fatal("unexpected type non-nil") } }) t.Run("nested-nil", func(t *testing.T) { typ, got := UnderlyingPtr((**string)(nil)) if got != nil { t.Fatal("unexpected non-nil ptr returned from nil") } if typ != nil { t.Fatal("unexpected type non-nil") } }) t.Run("0-level", func(t *testing.T) { typ, got := UnderlyingPtr(sample) if got != nil { t.Fatal("unexpected non-nil ptr returned from nil") } if typ != nil { t.Fatal("unexpected type non-nil") } }) t.Run("method-0-level", func(t *testing.T) { typ, got := UnderlyingPtr(mockType(sample)) if got != nil { t.Fatal("unexpected non-nil ptr returned from nil") } if typ != nil { t.Fatal("unexpected type non-nil") } }) check := func(t *testing.T, v, expectTyp interface{}) { t.Helper() typ, got := UnderlyingPtr(v) eqType(t, typ, reflect.TypeOf(expectTyp)) eq(t, got) } t.Run("1-level", func(t *testing.T) { str := sample check(t, &str, sample) }) t.Run("2-level", func(t *testing.T) { str := sample ptr := &str check(t, &ptr, sample) }) t.Run("method-1-level", func(t *testing.T) { str := mockType(sample) check(t, &str, mockType("")) }) t.Run("method-2-level", func(t *testing.T) { str := mockType(sample) ptr := &str check(t, &ptr, mockType("")) }) } func TestAllocIndirect(t *testing.T) { // skip alloc test, since that's tested in driver/tests/. t.Run("noalloc", func(t *testing.T) { str := "hello, world" ptr1 := &str ptr2 := &ptr1 typ, ptr := AllocIndirect(reflect.TypeOf(ptr2), unsafe.Pointer(&ptr2)) if typ != reflect.TypeOf("") { t.Fatalf("unexpected (not string) type: %v", typ) } if ptr != unsafe.Pointer(&str) { t.Fatalf("unexpected ptr returned: expected %p got %p", unsafe.Pointer(&str), ptr) } }) } func TestIsZero(t *testing.T) { testWithSize := func(t *testing.T, size, offset int) { t.Helper() value := make([]byte, size) if !IsZero(unsafe.Pointer(&value[0]), uintptr(len(value))) { t.Error("value is not eq when it should be") } value[size-offset] = '\x01' if IsZero(unsafe.Pointer(&value[0]), uintptr(len(value))) { t.Error("value is eq when it should not be") } } t.Run("1024_end", func(t *testing.T) { testWithSize(t, 1024, 1) }) t.Run("1024_start", func(t *testing.T) { testWithSize(t, 1024, 1024/4) }) t.Run("4096_end", func(t *testing.T) { testWithSize(t, 4096, 1) }) t.Run("4096_start", func(t *testing.T) { testWithSize(t, 4096, 4096/4) }) t.Run("1024000_end", func(t *testing.T) { testWithSize(t, 1024000, 1) }) t.Run("1024000_start", func(t *testing.T) { testWithSize(t, 1024000, 1024000/4) }) t.Run("zlen_end", func(t *testing.T) { testWithSize(t, zeroesLen, 1) }) t.Run("zlen_start", func(t *testing.T) { testWithSize(t, zeroesLen, zeroesLen/4) }) t.Run("4096000_end", func(t *testing.T) { testWithSize(t, 4096000, 1) }) t.Run("4096000_start", func(t *testing.T) { testWithSize(t, 4096000, 4096000/4) }) } func TestZeroOut(t *testing.T) { rander := rand.New(rand.NewSource(time.Now().UnixNano())) testWithSize := func(t *testing.T, size int) { value := make([]byte, size) // Read until the values are not completely zero. We can use IsZeroBytes // because we've already tested it above. for IsZeroBytes(value) { _, err := rander.Read(value) if err != nil { t.Error("failed to math.rand Read:", err) return } } ZeroOutBytes(value) if !IsZeroBytes(value) { t.Log("ZeroOutBytes fail, last 0 at", bytes.LastIndexByte(value, '0')) t.Error("ZeroOutBytes did not zero out completely") } } t.Run("1024", func(t *testing.T) { testWithSize(t, 1024) }) t.Run("4096", func(t *testing.T) { testWithSize(t, 4096) }) t.Run("1024000", func(t *testing.T) { testWithSize(t, 1024000) }) t.Run("zeroesLen", func(t *testing.T) { testWithSize(t, zeroesLen) }) t.Run("4096000", func(t *testing.T) { testWithSize(t, 4096000) }) } func TestIsLittleEndian(t *testing.T) { lscpu := exec.Command("lscpu") o, err := lscpu.Output() if err != nil { t.Skip("no lscpu:", err) } for _, line := range bytes.Split(o, []byte("\n")) { if !bytes.Contains(line, []byte("Byte Order:")) { continue } words := bytes.Fields(line) lastTwo := bytes.Join(words[len(words)-2:], []byte(" ")) switch string(lastTwo) { case "Little Endian": if !IsLittleEndian { t.Fatal("not little endian") } return case "Big Endian": if IsLittleEndian { t.Fatal("not big endian") } return default: t.Skipf("unknown Byte Order value %q", words) } } t.Skip("unrecognized lscpu output") } func TestWithinSlice(t *testing.T) { outer := make([]byte, 0, 50) inner := outer[2:34] if !WithinBytes(outer, inner) { t.Fatal("unexpected outer/inner result") } if WithinBytes(outer, make([]byte, 0, 10)) { t.Fatal("new slice is incorrectly within outer") } } func TestNumberLE(t *testing.T) { if !IsLittleEndian { t.Skip("skipping NumberLE test, since not LE machine") } // Restore LE after done. t.Cleanup(func() { IsLittleEndian = true }) var tests = []interface{}{ uint8('c'), uint16(math.MaxUint16), uint32(math.MaxUint32), uint64(math.MaxUint64), int8('c'), int16(math.MaxInt16), int32(math.MaxInt32), int64(math.MaxInt64), float32(math.Inf(-1)), float32(math.Inf(+1)), float64(math.Inf(-1)), float64(math.Inf(+1)), complex64(5 + 10i), complex128(5 + 10i), } for _, test := range tests { ptr := InterfacePtr(test) typ := reflect.TypeOf(test) IsLittleEndian = true le := NumberLE(typ.Kind(), ptr) IsLittleEndian = false be := NumberLE(typ.Kind(), ptr) if !bytes.Equal(le, be) { t.Fatalf("big endian != little endian output\nLE: %v\nBE: %v", le, be) } for name, input := range map[string][]byte{"LE": le, "BE": be} { valLE := reflect.New(typ) IsLittleEndian = true if !ReadNumberLE(input, typ.Kind(), unsafe.Pointer(valLE.Pointer())) { t.Fatalf("ReadNumberLE fail on Little Endian with %s input", name) } if v := valLE.Elem().Interface(); v != test { t.Fatalf("ReadNumberLE Little Endian output differs: %v != %v", v, test) } valBE := reflect.New(typ) IsLittleEndian = false if !ReadNumberLE(input, typ.Kind(), unsafe.Pointer(valBE.Pointer())) { t.Fatalf("ReadNumberLE fail on Big Endian with %s input", name) } if v := valBE.Elem().Interface(); v != test { t.Fatalf("ReadNumberLE Big Endian output differs: %v != %v", v, test) } } } } type testStruct struct { Field1 string Field2 string Foo int Bar int Astolfo anotherStruct } type anotherStruct struct { Astolfo string } func TestStructInfo(t *testing.T) { expect := StructInfo{ Type: reflect.TypeOf(testStruct{}), RawSchema: []byte("Field1\x00Field2\x00Foo\x00Bar\x00Astolfo"), Fields: []StructField{ { Type: reflect.TypeOf(""), Kind: reflect.String, Name: []byte("Field1"), Size: 2 * unsafe.Sizeof(0), Offset: 0, }, { Type: reflect.TypeOf(""), Kind: reflect.String, Name: []byte("Field2"), Size: 2 * unsafe.Sizeof(0), Offset: 2 * unsafe.Sizeof(0), }, { Type: reflect.TypeOf(int(0)), Kind: reflect.Int, Name: []byte("Foo"), Size: unsafe.Sizeof(0), Offset: 4 * unsafe.Sizeof(0), }, { Type: reflect.TypeOf(int(0)), Kind: reflect.Int, Name: []byte("Bar"), Size: unsafe.Sizeof(0), Offset: 5 * unsafe.Sizeof(0), }, { Type: reflect.TypeOf(anotherStruct{}), Kind: reflect.Struct, Name: []byte("Astolfo"), Size: 2 * unsafe.Sizeof(0), Offset: 6 * unsafe.Sizeof(0), }, }, } got := GetStructInfo(reflect.TypeOf(testStruct{})) for _, ineq := range deep.Equal(&expect, got) { t.Errorf("expect/got: %q", ineq) } }
TestUnderlyingPtr
customer_service.pb.go
// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.25.0 // protoc v3.15.3 // source: google/ads/googleads/v7/services/customer_service.proto package services import ( context "context" proto "github.com/golang/protobuf/proto" enums "github.com/dictav/go-genproto-googleads/pb/v7/enums" resources "github.com/dictav/go-genproto-googleads/pb/v7/resources" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" reflect "reflect" sync "sync" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) // This is a compile-time assertion that a sufficiently up-to-date version // of the legacy proto package is being used. const _ = proto.ProtoPackageIsVersion4 // Request message for [CustomerService.GetCustomer][google.ads.googleads.v7.services.CustomerService.GetCustomer]. type GetCustomerRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Required. The resource name of the customer to fetch. ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` } func (x *GetCustomerRequest) Reset() { *x = GetCustomerRequest{} if protoimpl.UnsafeEnabled { mi := &file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetCustomerRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetCustomerRequest) ProtoMessage() {} func (x *GetCustomerRequest) ProtoReflect() protoreflect.Message { mi := &file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetCustomerRequest.ProtoReflect.Descriptor instead. func (*GetCustomerRequest) Descriptor() ([]byte, []int) { return file_google_ads_googleads_v7_services_customer_service_proto_rawDescGZIP(), []int{0} } func (x *GetCustomerRequest) GetResourceName() string { if x != nil { return x.ResourceName } return "" } // Request message for [CustomerService.MutateCustomer][google.ads.googleads.v7.services.CustomerService.MutateCustomer]. type MutateCustomerRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Required. The ID of the customer being modified. CustomerId string `protobuf:"bytes,1,opt,name=customer_id,json=customerId,proto3" json:"customer_id,omitempty"` // Required. The operation to perform on the customer Operation *CustomerOperation `protobuf:"bytes,4,opt,name=operation,proto3" json:"operation,omitempty"` // If true, the request is validated but not executed. Only errors are // returned, not results. ValidateOnly bool `protobuf:"varint,5,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"` // The response content type setting. Determines whether the mutable resource // or just the resource name should be returned post mutation. ResponseContentType enums.ResponseContentTypeEnum_ResponseContentType `protobuf:"varint,6,opt,name=response_content_type,json=responseContentType,proto3,enum=google.ads.googleads.v7.enums.ResponseContentTypeEnum_ResponseContentType" json:"response_content_type,omitempty"` } func (x *MutateCustomerRequest) Reset() { *x = MutateCustomerRequest{} if protoimpl.UnsafeEnabled { mi := &file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *MutateCustomerRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*MutateCustomerRequest) ProtoMessage() {} func (x *MutateCustomerRequest) ProtoReflect() protoreflect.Message { mi := &file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use MutateCustomerRequest.ProtoReflect.Descriptor instead. func (*MutateCustomerRequest) Descriptor() ([]byte, []int) { return file_google_ads_googleads_v7_services_customer_service_proto_rawDescGZIP(), []int{1} } func (x *MutateCustomerRequest) GetCustomerId() string { if x != nil { return x.CustomerId } return "" } func (x *MutateCustomerRequest) GetOperation() *CustomerOperation { if x != nil { return x.Operation } return nil } func (x *MutateCustomerRequest) GetValidateOnly() bool { if x != nil { return x.ValidateOnly } return false } func (x *MutateCustomerRequest) GetResponseContentType() enums.ResponseContentTypeEnum_ResponseContentType { if x != nil { return x.ResponseContentType } return enums.ResponseContentTypeEnum_UNSPECIFIED } // Request message for [CustomerService.CreateCustomerClient][google.ads.googleads.v7.services.CustomerService.CreateCustomerClient]. type CreateCustomerClientRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Required. The ID of the Manager under whom client customer is being created. CustomerId string `protobuf:"bytes,1,opt,name=customer_id,json=customerId,proto3" json:"customer_id,omitempty"` // Required. The new client customer to create. The resource name on this customer // will be ignored. CustomerClient *resources.Customer `protobuf:"bytes,2,opt,name=customer_client,json=customerClient,proto3" json:"customer_client,omitempty"` // Email address of the user who should be invited on the created client // customer. Accessible only to customers on the allow-list. EmailAddress *string `protobuf:"bytes,5,opt,name=email_address,json=emailAddress,proto3,oneof" json:"email_address,omitempty"` // The proposed role of user on the created client customer. // Accessible only to customers on the allow-list. AccessRole enums.AccessRoleEnum_AccessRole `protobuf:"varint,4,opt,name=access_role,json=accessRole,proto3,enum=google.ads.googleads.v7.enums.AccessRoleEnum_AccessRole" json:"access_role,omitempty"` // If true, the request is validated but not executed. Only errors are // returned, not results. ValidateOnly bool `protobuf:"varint,6,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"` } func (x *CreateCustomerClientRequest) Reset() { *x = CreateCustomerClientRequest{} if protoimpl.UnsafeEnabled { mi := &file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *CreateCustomerClientRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*CreateCustomerClientRequest) ProtoMessage() {} func (x *CreateCustomerClientRequest) ProtoReflect() protoreflect.Message { mi := &file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use CreateCustomerClientRequest.ProtoReflect.Descriptor instead. func (*CreateCustomerClientRequest) Descriptor() ([]byte, []int) { return file_google_ads_googleads_v7_services_customer_service_proto_rawDescGZIP(), []int{2} } func (x *CreateCustomerClientRequest) GetCustomerId() string { if x != nil { return x.CustomerId } return "" } func (x *CreateCustomerClientRequest) GetCustomerClient() *resources.Customer { if x != nil { return x.CustomerClient } return nil } func (x *CreateCustomerClientRequest) GetEmailAddress() string { if x != nil && x.EmailAddress != nil { return *x.EmailAddress } return "" } func (x *CreateCustomerClientRequest) GetAccessRole() enums.AccessRoleEnum_AccessRole { if x != nil { return x.AccessRole } return enums.AccessRoleEnum_UNSPECIFIED } func (x *CreateCustomerClientRequest) GetValidateOnly() bool { if x != nil { return x.ValidateOnly } return false } // A single update on a customer. type CustomerOperation struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Mutate operation. Only updates are supported for customer. Update *resources.Customer `protobuf:"bytes,1,opt,name=update,proto3" json:"update,omitempty"` // FieldMask that determines which resource fields are modified in an update. UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } func (x *CustomerOperation) Reset() { *x = CustomerOperation{} if protoimpl.UnsafeEnabled { mi := &file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *CustomerOperation) String() string { return protoimpl.X.MessageStringOf(x) } func (*CustomerOperation) ProtoMessage() {} func (x *CustomerOperation) ProtoReflect() protoreflect.Message { mi := &file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use CustomerOperation.ProtoReflect.Descriptor instead. func (*CustomerOperation) Descriptor() ([]byte, []int) { return file_google_ads_googleads_v7_services_customer_service_proto_rawDescGZIP(), []int{3} } func (x *CustomerOperation) GetUpdate() *resources.Customer { if x != nil { return x.Update } return nil } func (x *CustomerOperation) GetUpdateMask() *fieldmaskpb.FieldMask { if x != nil { return x.UpdateMask } return nil } // Response message for CreateCustomerClient mutate. type CreateCustomerClientResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // The resource name of the newly created customer client. ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` // Link for inviting user to access the created customer. Accessible to // allowlisted customers only. InvitationLink string `protobuf:"bytes,3,opt,name=invitation_link,json=invitationLink,proto3" json:"invitation_link,omitempty"` } func (x *CreateCustomerClientResponse) Reset() { *x = CreateCustomerClientResponse{} if protoimpl.UnsafeEnabled { mi := &file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *CreateCustomerClientResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*CreateCustomerClientResponse) ProtoMessage() {} func (x *CreateCustomerClientResponse) ProtoReflect() protoreflect.Message { mi := &file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use CreateCustomerClientResponse.ProtoReflect.Descriptor instead. func (*CreateCustomerClientResponse) Descriptor() ([]byte, []int) { return file_google_ads_googleads_v7_services_customer_service_proto_rawDescGZIP(), []int{4} } func (x *CreateCustomerClientResponse) GetResourceName() string { if x != nil { return x.ResourceName } return "" } func (x *CreateCustomerClientResponse) GetInvitationLink() string { if x != nil { return x.InvitationLink } return "" } // Response message for customer mutate. type MutateCustomerResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Result for the mutate. Result *MutateCustomerResult `protobuf:"bytes,2,opt,name=result,proto3" json:"result,omitempty"` } func (x *MutateCustomerResponse) Reset() { *x = MutateCustomerResponse{} if protoimpl.UnsafeEnabled { mi := &file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *MutateCustomerResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*MutateCustomerResponse) ProtoMessage() {} func (x *MutateCustomerResponse) ProtoReflect() protoreflect.Message { mi := &file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use MutateCustomerResponse.ProtoReflect.Descriptor instead. func (*MutateCustomerResponse) Descriptor() ([]byte, []int) { return file_google_ads_googleads_v7_services_customer_service_proto_rawDescGZIP(), []int{5} } func (x *MutateCustomerResponse) GetResult() *MutateCustomerResult { if x != nil { return x.Result } return nil } // The result for the customer mutate. type MutateCustomerResult struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Returned for successful operations. ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` // The mutated customer with only mutable fields after mutate. The fields will // only be returned when response_content_type is set to "MUTABLE_RESOURCE". Customer *resources.Customer `protobuf:"bytes,2,opt,name=customer,proto3" json:"customer,omitempty"` } func (x *MutateCustomerResult) Reset() { *x = MutateCustomerResult{} if protoimpl.UnsafeEnabled { mi := &file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *MutateCustomerResult) String() string { return protoimpl.X.MessageStringOf(x) } func (*MutateCustomerResult) ProtoMessage() {} func (x *MutateCustomerResult) ProtoReflect() protoreflect.Message { mi := &file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use MutateCustomerResult.ProtoReflect.Descriptor instead. func (*MutateCustomerResult) Descriptor() ([]byte, []int) { return file_google_ads_googleads_v7_services_customer_service_proto_rawDescGZIP(), []int{6} } func (x *MutateCustomerResult) GetResourceName() string { if x != nil { return x.ResourceName } return "" } func (x *MutateCustomerResult) GetCustomer() *resources.Customer { if x != nil { return x.Customer } return nil } // Request message for [CustomerService.ListAccessibleCustomers][google.ads.googleads.v7.services.CustomerService.ListAccessibleCustomers]. type ListAccessibleCustomersRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *ListAccessibleCustomersRequest) Reset() { *x = ListAccessibleCustomersRequest{} if protoimpl.UnsafeEnabled { mi := &file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ListAccessibleCustomersRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*ListAccessibleCustomersRequest) ProtoMessage() {} func (x *ListAccessibleCustomersRequest) ProtoReflect() protoreflect.Message { mi := &file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ListAccessibleCustomersRequest.ProtoReflect.Descriptor instead. func (*ListAccessibleCustomersRequest) Descriptor() ([]byte, []int) { return file_google_ads_googleads_v7_services_customer_service_proto_rawDescGZIP(), []int{7}
} // Response message for [CustomerService.ListAccessibleCustomers][google.ads.googleads.v7.services.CustomerService.ListAccessibleCustomers]. type ListAccessibleCustomersResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Resource name of customers directly accessible by the // user authenticating the call. ResourceNames []string `protobuf:"bytes,1,rep,name=resource_names,json=resourceNames,proto3" json:"resource_names,omitempty"` } func (x *ListAccessibleCustomersResponse) Reset() { *x = ListAccessibleCustomersResponse{} if protoimpl.UnsafeEnabled { mi := &file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ListAccessibleCustomersResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*ListAccessibleCustomersResponse) ProtoMessage() {} func (x *ListAccessibleCustomersResponse) ProtoReflect() protoreflect.Message { mi := &file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ListAccessibleCustomersResponse.ProtoReflect.Descriptor instead. func (*ListAccessibleCustomersResponse) Descriptor() ([]byte, []int) { return file_google_ads_googleads_v7_services_customer_service_proto_rawDescGZIP(), []int{8} } func (x *ListAccessibleCustomersResponse) GetResourceNames() []string { if x != nil { return x.ResourceNames } return nil } var File_google_ads_googleads_v7_services_customer_service_proto protoreflect.FileDescriptor var file_google_ads_googleads_v7_services_customer_service_proto_rawDesc = []byte{ 0x0a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x37, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x37, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x37, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x6f, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x39, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x37, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x37, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x64, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xba, 0x02, 0x0a, 0x15, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x49, 0x64, 0x12, 0x56, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x37, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x7e, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x37, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x45, 0x6e, 0x75, 0x6d, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x13, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0xda, 0x02, 0x0a, 0x1b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x49, 0x64, 0x12, 0x59, 0x0a, 0x0f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x37, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x88, 0x01, 0x01, 0x12, 0x59, 0x0a, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x37, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x6f, 0x6c, 0x65, 0x45, 0x6e, 0x75, 0x6d, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x6f, 0x6c, 0x65, 0x52, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x6f, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x95, 0x01, 0x0a, 0x11, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x37, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x6c, 0x0a, 0x1c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x6e, 0x6b, 0x22, 0x68, 0x0a, 0x16, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x37, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x14, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x08, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x37, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x52, 0x08, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x22, 0x20, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x48, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x32, 0x98, 0x07, 0x0a, 0x0f, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xa9, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x37, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x37, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x22, 0x37, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x37, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xcc, 0x01, 0x0a, 0x0e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x12, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x37, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x37, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x22, 0x24, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x3d, 0x2a, 0x7d, 0x3a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x2c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0xcd, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x73, 0x12, 0x40, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x37, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x37, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x12, 0x25, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x73, 0x3a, 0x6c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x73, 0x12, 0xf2, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x37, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x37, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x32, 0x2f, 0x76, 0x37, 0x2f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x3d, 0x2a, 0x7d, 0x3a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x1b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x2c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x45, 0xca, 0x41, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x27, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x61, 0x64, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x42, 0xfb, 0x01, 0x0a, 0x24, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x37, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x14, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x37, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x3b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0xa2, 0x02, 0x03, 0x47, 0x41, 0x41, 0xaa, 0x02, 0x20, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x73, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x2e, 0x56, 0x37, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0xca, 0x02, 0x20, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x73, 0x5c, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x5c, 0x56, 0x37, 0x5c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0xea, 0x02, 0x24, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x73, 0x3a, 0x3a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x3a, 0x3a, 0x56, 0x37, 0x3a, 0x3a, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( file_google_ads_googleads_v7_services_customer_service_proto_rawDescOnce sync.Once file_google_ads_googleads_v7_services_customer_service_proto_rawDescData = file_google_ads_googleads_v7_services_customer_service_proto_rawDesc ) func file_google_ads_googleads_v7_services_customer_service_proto_rawDescGZIP() []byte { file_google_ads_googleads_v7_services_customer_service_proto_rawDescOnce.Do(func() { file_google_ads_googleads_v7_services_customer_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_ads_googleads_v7_services_customer_service_proto_rawDescData) }) return file_google_ads_googleads_v7_services_customer_service_proto_rawDescData } var file_google_ads_googleads_v7_services_customer_service_proto_msgTypes = make([]protoimpl.MessageInfo, 9) var file_google_ads_googleads_v7_services_customer_service_proto_goTypes = []interface{}{ (*GetCustomerRequest)(nil), // 0: google.ads.googleads.v7.services.GetCustomerRequest (*MutateCustomerRequest)(nil), // 1: google.ads.googleads.v7.services.MutateCustomerRequest (*CreateCustomerClientRequest)(nil), // 2: google.ads.googleads.v7.services.CreateCustomerClientRequest (*CustomerOperation)(nil), // 3: google.ads.googleads.v7.services.CustomerOperation (*CreateCustomerClientResponse)(nil), // 4: google.ads.googleads.v7.services.CreateCustomerClientResponse (*MutateCustomerResponse)(nil), // 5: google.ads.googleads.v7.services.MutateCustomerResponse (*MutateCustomerResult)(nil), // 6: google.ads.googleads.v7.services.MutateCustomerResult (*ListAccessibleCustomersRequest)(nil), // 7: google.ads.googleads.v7.services.ListAccessibleCustomersRequest (*ListAccessibleCustomersResponse)(nil), // 8: google.ads.googleads.v7.services.ListAccessibleCustomersResponse (enums.ResponseContentTypeEnum_ResponseContentType)(0), // 9: google.ads.googleads.v7.enums.ResponseContentTypeEnum.ResponseContentType (*resources.Customer)(nil), // 10: google.ads.googleads.v7.resources.Customer (enums.AccessRoleEnum_AccessRole)(0), // 11: google.ads.googleads.v7.enums.AccessRoleEnum.AccessRole (*fieldmaskpb.FieldMask)(nil), // 12: google.protobuf.FieldMask } var file_google_ads_googleads_v7_services_customer_service_proto_depIdxs = []int32{ 3, // 0: google.ads.googleads.v7.services.MutateCustomerRequest.operation:type_name -> google.ads.googleads.v7.services.CustomerOperation 9, // 1: google.ads.googleads.v7.services.MutateCustomerRequest.response_content_type:type_name -> google.ads.googleads.v7.enums.ResponseContentTypeEnum.ResponseContentType 10, // 2: google.ads.googleads.v7.services.CreateCustomerClientRequest.customer_client:type_name -> google.ads.googleads.v7.resources.Customer 11, // 3: google.ads.googleads.v7.services.CreateCustomerClientRequest.access_role:type_name -> google.ads.googleads.v7.enums.AccessRoleEnum.AccessRole 10, // 4: google.ads.googleads.v7.services.CustomerOperation.update:type_name -> google.ads.googleads.v7.resources.Customer 12, // 5: google.ads.googleads.v7.services.CustomerOperation.update_mask:type_name -> google.protobuf.FieldMask 6, // 6: google.ads.googleads.v7.services.MutateCustomerResponse.result:type_name -> google.ads.googleads.v7.services.MutateCustomerResult 10, // 7: google.ads.googleads.v7.services.MutateCustomerResult.customer:type_name -> google.ads.googleads.v7.resources.Customer 0, // 8: google.ads.googleads.v7.services.CustomerService.GetCustomer:input_type -> google.ads.googleads.v7.services.GetCustomerRequest 1, // 9: google.ads.googleads.v7.services.CustomerService.MutateCustomer:input_type -> google.ads.googleads.v7.services.MutateCustomerRequest 7, // 10: google.ads.googleads.v7.services.CustomerService.ListAccessibleCustomers:input_type -> google.ads.googleads.v7.services.ListAccessibleCustomersRequest 2, // 11: google.ads.googleads.v7.services.CustomerService.CreateCustomerClient:input_type -> google.ads.googleads.v7.services.CreateCustomerClientRequest 10, // 12: google.ads.googleads.v7.services.CustomerService.GetCustomer:output_type -> google.ads.googleads.v7.resources.Customer 5, // 13: google.ads.googleads.v7.services.CustomerService.MutateCustomer:output_type -> google.ads.googleads.v7.services.MutateCustomerResponse 8, // 14: google.ads.googleads.v7.services.CustomerService.ListAccessibleCustomers:output_type -> google.ads.googleads.v7.services.ListAccessibleCustomersResponse 4, // 15: google.ads.googleads.v7.services.CustomerService.CreateCustomerClient:output_type -> google.ads.googleads.v7.services.CreateCustomerClientResponse 12, // [12:16] is the sub-list for method output_type 8, // [8:12] is the sub-list for method input_type 8, // [8:8] is the sub-list for extension type_name 8, // [8:8] is the sub-list for extension extendee 0, // [0:8] is the sub-list for field type_name } func init() { file_google_ads_googleads_v7_services_customer_service_proto_init() } func file_google_ads_googleads_v7_services_customer_service_proto_init() { if File_google_ads_googleads_v7_services_customer_service_proto != nil { return } if !protoimpl.UnsafeEnabled { file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetCustomerRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MutateCustomerRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CreateCustomerClientRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CustomerOperation); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CreateCustomerClientResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MutateCustomerResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MutateCustomerResult); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListAccessibleCustomersRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListAccessibleCustomersResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } } file_google_ads_googleads_v7_services_customer_service_proto_msgTypes[2].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_ads_googleads_v7_services_customer_service_proto_rawDesc, NumEnums: 0, NumMessages: 9, NumExtensions: 0, NumServices: 1, }, GoTypes: file_google_ads_googleads_v7_services_customer_service_proto_goTypes, DependencyIndexes: file_google_ads_googleads_v7_services_customer_service_proto_depIdxs, MessageInfos: file_google_ads_googleads_v7_services_customer_service_proto_msgTypes, }.Build() File_google_ads_googleads_v7_services_customer_service_proto = out.File file_google_ads_googleads_v7_services_customer_service_proto_rawDesc = nil file_google_ads_googleads_v7_services_customer_service_proto_goTypes = nil file_google_ads_googleads_v7_services_customer_service_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // CustomerServiceClient is the client API for CustomerService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type CustomerServiceClient interface { // Returns the requested customer in full detail. // // List of thrown errors: // [AuthenticationError]() // [AuthorizationError]() // [HeaderError]() // [InternalError]() // [QuotaError]() // [RequestError]() GetCustomer(ctx context.Context, in *GetCustomerRequest, opts ...grpc.CallOption) (*resources.Customer, error) // Updates a customer. Operation statuses are returned. // // List of thrown errors: // [AuthenticationError]() // [AuthorizationError]() // [DatabaseError]() // [FieldMaskError]() // [HeaderError]() // [InternalError]() // [QuotaError]() // [RequestError]() // [UrlFieldError]() MutateCustomer(ctx context.Context, in *MutateCustomerRequest, opts ...grpc.CallOption) (*MutateCustomerResponse, error) // Returns resource names of customers directly accessible by the // user authenticating the call. // // List of thrown errors: // [AuthenticationError]() // [AuthorizationError]() // [HeaderError]() // [InternalError]() // [QuotaError]() // [RequestError]() ListAccessibleCustomers(ctx context.Context, in *ListAccessibleCustomersRequest, opts ...grpc.CallOption) (*ListAccessibleCustomersResponse, error) // Creates a new client under manager. The new client customer is returned. // // List of thrown errors: // [AccessInvitationError]() // [AuthenticationError]() // [AuthorizationError]() // [CurrencyCodeError]() // [HeaderError]() // [InternalError]() // [ManagerLinkError]() // [QuotaError]() // [RequestError]() // [StringLengthError]() // [TimeZoneError]() CreateCustomerClient(ctx context.Context, in *CreateCustomerClientRequest, opts ...grpc.CallOption) (*CreateCustomerClientResponse, error) } type customerServiceClient struct { cc grpc.ClientConnInterface } func NewCustomerServiceClient(cc grpc.ClientConnInterface) CustomerServiceClient { return &customerServiceClient{cc} } func (c *customerServiceClient) GetCustomer(ctx context.Context, in *GetCustomerRequest, opts ...grpc.CallOption) (*resources.Customer, error) { out := new(resources.Customer) err := c.cc.Invoke(ctx, "/google.ads.googleads.v7.services.CustomerService/GetCustomer", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *customerServiceClient) MutateCustomer(ctx context.Context, in *MutateCustomerRequest, opts ...grpc.CallOption) (*MutateCustomerResponse, error) { out := new(MutateCustomerResponse) err := c.cc.Invoke(ctx, "/google.ads.googleads.v7.services.CustomerService/MutateCustomer", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *customerServiceClient) ListAccessibleCustomers(ctx context.Context, in *ListAccessibleCustomersRequest, opts ...grpc.CallOption) (*ListAccessibleCustomersResponse, error) { out := new(ListAccessibleCustomersResponse) err := c.cc.Invoke(ctx, "/google.ads.googleads.v7.services.CustomerService/ListAccessibleCustomers", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *customerServiceClient) CreateCustomerClient(ctx context.Context, in *CreateCustomerClientRequest, opts ...grpc.CallOption) (*CreateCustomerClientResponse, error) { out := new(CreateCustomerClientResponse) err := c.cc.Invoke(ctx, "/google.ads.googleads.v7.services.CustomerService/CreateCustomerClient", in, out, opts...) if err != nil { return nil, err } return out, nil } // CustomerServiceServer is the server API for CustomerService service. type CustomerServiceServer interface { // Returns the requested customer in full detail. // // List of thrown errors: // [AuthenticationError]() // [AuthorizationError]() // [HeaderError]() // [InternalError]() // [QuotaError]() // [RequestError]() GetCustomer(context.Context, *GetCustomerRequest) (*resources.Customer, error) // Updates a customer. Operation statuses are returned. // // List of thrown errors: // [AuthenticationError]() // [AuthorizationError]() // [DatabaseError]() // [FieldMaskError]() // [HeaderError]() // [InternalError]() // [QuotaError]() // [RequestError]() // [UrlFieldError]() MutateCustomer(context.Context, *MutateCustomerRequest) (*MutateCustomerResponse, error) // Returns resource names of customers directly accessible by the // user authenticating the call. // // List of thrown errors: // [AuthenticationError]() // [AuthorizationError]() // [HeaderError]() // [InternalError]() // [QuotaError]() // [RequestError]() ListAccessibleCustomers(context.Context, *ListAccessibleCustomersRequest) (*ListAccessibleCustomersResponse, error) // Creates a new client under manager. The new client customer is returned. // // List of thrown errors: // [AccessInvitationError]() // [AuthenticationError]() // [AuthorizationError]() // [CurrencyCodeError]() // [HeaderError]() // [InternalError]() // [ManagerLinkError]() // [QuotaError]() // [RequestError]() // [StringLengthError]() // [TimeZoneError]() CreateCustomerClient(context.Context, *CreateCustomerClientRequest) (*CreateCustomerClientResponse, error) } // UnimplementedCustomerServiceServer can be embedded to have forward compatible implementations. type UnimplementedCustomerServiceServer struct { } func (*UnimplementedCustomerServiceServer) GetCustomer(context.Context, *GetCustomerRequest) (*resources.Customer, error) { return nil, status.Errorf(codes.Unimplemented, "method GetCustomer not implemented") } func (*UnimplementedCustomerServiceServer) MutateCustomer(context.Context, *MutateCustomerRequest) (*MutateCustomerResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method MutateCustomer not implemented") } func (*UnimplementedCustomerServiceServer) ListAccessibleCustomers(context.Context, *ListAccessibleCustomersRequest) (*ListAccessibleCustomersResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListAccessibleCustomers not implemented") } func (*UnimplementedCustomerServiceServer) CreateCustomerClient(context.Context, *CreateCustomerClientRequest) (*CreateCustomerClientResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateCustomerClient not implemented") } func RegisterCustomerServiceServer(s *grpc.Server, srv CustomerServiceServer) { s.RegisterService(&_CustomerService_serviceDesc, srv) } func _CustomerService_GetCustomer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetCustomerRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CustomerServiceServer).GetCustomer(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/google.ads.googleads.v7.services.CustomerService/GetCustomer", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CustomerServiceServer).GetCustomer(ctx, req.(*GetCustomerRequest)) } return interceptor(ctx, in, info, handler) } func _CustomerService_MutateCustomer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(MutateCustomerRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CustomerServiceServer).MutateCustomer(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/google.ads.googleads.v7.services.CustomerService/MutateCustomer", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CustomerServiceServer).MutateCustomer(ctx, req.(*MutateCustomerRequest)) } return interceptor(ctx, in, info, handler) } func _CustomerService_ListAccessibleCustomers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListAccessibleCustomersRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CustomerServiceServer).ListAccessibleCustomers(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/google.ads.googleads.v7.services.CustomerService/ListAccessibleCustomers", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CustomerServiceServer).ListAccessibleCustomers(ctx, req.(*ListAccessibleCustomersRequest)) } return interceptor(ctx, in, info, handler) } func _CustomerService_CreateCustomerClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateCustomerClientRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CustomerServiceServer).CreateCustomerClient(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/google.ads.googleads.v7.services.CustomerService/CreateCustomerClient", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CustomerServiceServer).CreateCustomerClient(ctx, req.(*CreateCustomerClientRequest)) } return interceptor(ctx, in, info, handler) } var _CustomerService_serviceDesc = grpc.ServiceDesc{ ServiceName: "google.ads.googleads.v7.services.CustomerService", HandlerType: (*CustomerServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "GetCustomer", Handler: _CustomerService_GetCustomer_Handler, }, { MethodName: "MutateCustomer", Handler: _CustomerService_MutateCustomer_Handler, }, { MethodName: "ListAccessibleCustomers", Handler: _CustomerService_ListAccessibleCustomers_Handler, }, { MethodName: "CreateCustomerClient", Handler: _CustomerService_CreateCustomerClient_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "google/ads/googleads/v7/services/customer_service.proto", }
col_decimal32_safe_gen.go
//go:build !(amd64 || arm64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. package proto import ( "encoding/binary" "github.com/go-faster/errors" ) var _ = binary.LittleEndian // clickHouse uses LittleEndian // DecodeColumn decodes Decimal32 rows from *Reader. func (c *ColDecimal32) DecodeColumn(r *Reader, rows int) error { if rows == 0 { return nil } const size = 32 / 8 data, err := r.ReadRaw(rows * size) if err != nil { return errors.Wrap(err, "read") } v := *c // Move bound check out of loop.
for i := 0; i <= len(data)-size; i += size { v = append(v, Decimal32(binary.LittleEndian.Uint32(data[i:i+size])), ) } *c = v return nil } // EncodeColumn encodes Decimal32 rows to *Buffer. func (c ColDecimal32) EncodeColumn(b *Buffer) { if len(c) == 0 { return } const size = 32 / 8 offset := len(b.Buf) b.Buf = append(b.Buf, make([]byte, size*len(c))...) for _, v := range c { binary.LittleEndian.PutUint32( b.Buf[offset:offset+size], uint32(v), ) offset += size } }
// // See https://github.com/golang/go/issues/30945. _ = data[len(data)-size]
error.rs
use std::fmt; /// Error types encountered while parsing #[derive(Debug)] pub enum ErrorType { /// The key of a map isn't a string BadKeyType, /// The data ended early EarlyEnd, /// Expected an array ExpectedArray, /// Expected a `,` in an array ExpectedArrayComma, /// expected an boolean ExpectedBoolean, /// Expected an enum ExpectedEnum, /// Expected a float ExpectedFloat, /// Expected an integer ExpectedInteger, /// Expected a map ExpectedMap, /// Expected an `:` to seperate key and value in an object ExpectedObjectColon, /// Expected a `,` in an object ExpectedMapComma, /// Expected the object to end ExpectedMapEnd, /// Expected a null ExpectedNull, /// Expected a number ExpectedNumber, /// Expected a signed number ExpectedSigned, /// Expected a string ExpectedString, /// Expected an unsigned number ExpectedUnsigned, /// Internal error InternalError, /// Invalid escape sequence InvalidEscape, /// Invalid exponent in a floating point number InvalidExponent, /// Invalid number InvalidNumber, /// Inbalid UTF8 codepoint InvalidUTF8, /// Invalid Unicode escape sequence InvalidUnicodeEscape, /// Inbalid Unicode codepoint InvlaidUnicodeCodepoint, /// Object Key isn't a string KeyMustBeAString, /// Non structural character NoStructure, /// Parser Erropr Parser, /// Early End Of File EOF, /// Generic serde error Serde(String), /// Generic syntax error Syntax, /// Training characters TrailingCharacters, /// Unexpected character UnexpectedCharacter, /// Unexpected end UnexpectedEnd, /// Unterminated string UnterminatedString, /// Expected Array elements ExpectedArrayContent, /// Expected Object elements ExpectedObjectContent, /// Expected Object Key ExpectedObjectKey, /// Overflow of a limited buffer Overflow, /// IO error IO(std::io::Error), } #[cfg_attr(tarpaulin, skip)] impl PartialEq for ErrorType { #[must_use] fn eq(&self, other: &Self) -> bool { match (self, other) { (Self::IO(_), Self::IO(_)) | (Self::BadKeyType, Self::BadKeyType) | (Self::EarlyEnd, Self::EarlyEnd) | (Self::ExpectedArray, Self::ExpectedArray) | (Self::ExpectedArrayComma, Self::ExpectedArrayComma) | (Self::ExpectedBoolean, Self::ExpectedBoolean) | (Self::ExpectedEnum, Self::ExpectedEnum) | (Self::ExpectedFloat, Self::ExpectedFloat) | (Self::ExpectedInteger, Self::ExpectedInteger) | (Self::ExpectedMap, Self::ExpectedMap) | (Self::ExpectedObjectColon, Self::ExpectedObjectColon) | (Self::ExpectedMapComma, Self::ExpectedMapComma) | (Self::ExpectedMapEnd, Self::ExpectedMapEnd) | (Self::ExpectedNull, Self::ExpectedNull) | (Self::ExpectedNumber, Self::ExpectedNumber) | (Self::ExpectedSigned, Self::ExpectedSigned) | (Self::ExpectedString, Self::ExpectedString) | (Self::ExpectedUnsigned, Self::ExpectedUnsigned) | (Self::InternalError, Self::InternalError) | (Self::InvalidEscape, Self::InvalidEscape) | (Self::InvalidExponent, Self::InvalidExponent) | (Self::InvalidNumber, Self::InvalidNumber) | (Self::InvalidUTF8, Self::InvalidUTF8) | (Self::InvalidUnicodeEscape, Self::InvalidUnicodeEscape) | (Self::InvlaidUnicodeCodepoint, Self::InvlaidUnicodeCodepoint) | (Self::KeyMustBeAString, Self::KeyMustBeAString) | (Self::NoStructure, Self::NoStructure) | (Self::Parser, Self::Parser) | (Self::EOF, Self::EOF) | (Self::Syntax, Self::Syntax) | (Self::TrailingCharacters, Self::TrailingCharacters) | (Self::UnexpectedCharacter, Self::UnexpectedCharacter) | (Self::UnexpectedEnd, Self::UnexpectedEnd) | (Self::UnterminatedString, Self::UnterminatedString) | (Self::ExpectedArrayContent, Self::ExpectedArrayContent) | (Self::ExpectedObjectContent, Self::ExpectedObjectContent) | (Self::ExpectedObjectKey, Self::ExpectedObjectKey) | (Self::Overflow, Self::Overflow) => true, (Self::Serde(s1), Self::Serde(s2)) => s1 == s2, _ => false, } } } /// Parser error #[derive(Debug, PartialEq)] pub struct Error { /// Byte index it was encountered at index: usize, /// Current character character: char, /// Tyep of error error: ErrorType, } impl Error { pub(crate) fn new(index: usize, character: char, error: ErrorType) -> Self { Self { index, character, error, } } pub(crate) fn generic(t: ErrorType) -> Self { Self { index: 0, character: '💩', //this is the poop emoji error: t, } } } #[cfg_attr(tarpaulin, skip)] impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "{:?} at character {} ('{}')", self.error, self.index, self.character ) } } #[cfg_attr(tarpaulin, skip)] impl From<Error> for std::io::Error { fn from(e: Error) -> Self { std::io::Error::new(std::io::ErrorKind::InvalidData, e) } } #[cfg(test)] mod test { use super::*; #[test] fn fmt() {
let e = Error::generic(ErrorType::InternalError); assert_eq!( format!("{}", e), "InternalError at character 0 ('\u{1f4a9}')" ) } }
booksearchresult.ts
export class
{ libraryId: string; libraryName: string; libraryPicture: string; title: string; author: string; isbn: string; publishYear: number; coverPicture: string; }
BookSearchResult
slice-const-param.rs
//[full] run-pass // revisions: min full #![cfg_attr(full, feature(const_generics))] #![cfg_attr(full, allow(incomplete_features))] #![cfg_attr(min, feature(min_const_generics))] pub fn function_with_str<const STRING: &'static str>() -> &'static str { //[min]~^ ERROR `&'static str` is forbidden STRING } pub fn function_with_bytes<const BYTES: &'static [u8]>() -> &'static [u8] { //[min]~^ ERROR `&'static [u8]` is forbidden BYTES } pub fn main()
{ assert_eq!(function_with_str::<"Rust">(), "Rust"); assert_eq!(function_with_str::<"ℇ㇈↦">(), "ℇ㇈↦"); assert_eq!(function_with_bytes::<b"AAAA">(), &[0x41, 0x41, 0x41, 0x41]); assert_eq!(function_with_bytes::<{&[0x41, 0x41, 0x41, 0x41]}>(), b"AAAA"); }
enums.rs
// Enums have few definite values enum Movement { //Variants Up, Down, Left, Right, }
fn move_avatar(m: Movement) { // Perform some actions // Match is identical to switch in C/C++ match m { Movement::Up => println!("Avatar moving Up"), Movement::Down => println!("Avatar moving Down"), Movement::Left => println!("Avatar moving Left"), Movement::Right => println!("Avatar moving Right"), } } pub fn run() { let avatar1 = Movement::Left; let avatar2 = Movement::Up; let avatar3 = Movement::Right; let avatar4 = Movement::Down; move_avatar(avatar1); move_avatar(avatar2); move_avatar(avatar3); move_avatar(avatar4); }
handlers.go
// © 2013 the AlePale Authors under the WTFPL. See AUTHORS for the list of authors. package bot import ( "code.google.com/p/velour/irc" "errors" "fmt" "labix.org/v2/mgo/bson" "math/rand" "regexp" "strconv" "strings" "time" ) // Interface used for compatibility with the Plugin interface type Handler interface { Message(message Message) bool Event(kind string, message Message) bool BotMessage(message Message) bool Help(channel string, parts []string) RegisterWeb() *string } // Checks to see if the user is asking for help, returns true if so and handles the situation. func (b *Bot) checkHelp(channel string, parts []string) { if len(parts) == 1 { // just print out a list of help topics topics := "Help topics: about variables" for name, _ := range b.Plugins { topics = fmt.Sprintf("%s, %s", topics, name) } b.SendMessage(channel, topics) } else { // trigger the proper plugin's help response if parts[1] == "about" { b.Help(channel, parts) return } if parts[1] == "variables" { b.listVars(channel, parts) return } plugin := b.Plugins[parts[1]] if plugin != nil { plugin.Help(channel, parts) } else { msg := fmt.Sprintf("I'm sorry, I don't know what %s is!", parts[1]) b.SendMessage(channel, msg) } } } // Checks if message is a command and returns its curtailed version func (b *Bot) isCmd(message string) (bool, string) { cmdc := b.Config.CommandChar botnick := strings.ToLower(b.Config.Nick) iscmd := false lowerMessage := strings.ToLower(message) if strings.HasPrefix(lowerMessage, cmdc) && len(cmdc) > 0 { iscmd = true message = message[len(cmdc):] // } else if match, _ := regexp.MatchString(rex, lowerMessage); match { } else if strings.HasPrefix(lowerMessage, botnick) && len(lowerMessage) > len(botnick) && (lowerMessage[len(botnick)] == ',' || lowerMessage[len(botnick)] == ':') { iscmd = true message = message[len(botnick):] // trim off the customary addressing punctuation if message[0] == ':' || message[0] == ',' { message = message[1:] } } // trim off any whitespace left on the message message = strings.TrimSpace(message) return iscmd, message } // Builds our internal message type out of a Conn & Line from irc func (b *Bot) buildMessage(conn *irc.Client, inMsg irc.Msg) Message { // Check for the user user := b.GetUser(inMsg.Origin) channel := inMsg.Args[0] if channel == b.Config.Nick { channel = inMsg.Args[0] } isAction := false var message string if len(inMsg.Args) > 1 { message = inMsg.Args[1] isAction = strings.HasPrefix(message, actionPrefix) if isAction { message = strings.TrimRight(message[len(actionPrefix):], "\x01") message = strings.TrimSpace(message) } } iscmd := false filteredMessage := message if !isAction { iscmd, filteredMessage = b.isCmd(message) } msg := Message{ User: user, Channel: channel, Body: filteredMessage, Raw: message, Command: iscmd, Action: isAction, Time: time.Now(), Host: inMsg.Host, } return msg } func (b *Bot) LastMessage(channel string) (Message, error) { log := <-b.logOut if len(log) == 0 { return Message{}, errors.New("No messages found.") } for i := len(log) - 1; i >= 0; i-- { msg := log[i] if strings.ToLower(msg.Channel) == strings.ToLower(channel) {
} return Message{}, errors.New("No messages found.") } // Take an input string and mutate it based on $vars in the string func (b *Bot) Filter(message Message, input string) string { rand.Seed(time.Now().Unix()) if strings.Contains(input, "$NICK") { nick := strings.ToUpper(message.User.Name) input = strings.Replace(input, "$NICK", nick, -1) } // Let's be bucket compatible for this var input = strings.Replace(input, "$who", "$nick", -1) if strings.Contains(input, "$nick") { nick := message.User.Name input = strings.Replace(input, "$nick", nick, -1) } for strings.Contains(input, "$someone") { nicks := b.Who(message.Channel) someone := nicks[rand.Intn(len(nicks))].Name input = strings.Replace(input, "$someone", someone, 1) } for strings.Contains(input, "$digit") { num := strconv.Itoa(rand.Intn(9)) input = strings.Replace(input, "$digit", num, 1) } for strings.Contains(input, "$nonzero") { num := strconv.Itoa(rand.Intn(8) + 1) input = strings.Replace(input, "$nonzero", num, 1) } r, err := regexp.Compile("\\$[A-z]+") if err != nil { panic(err) } varname := r.FindString(input) blacklist := make(map[string]bool) blacklist["$and"] = true for len(varname) > 0 && !blacklist[varname] { var result []Variable b.varColl.Find(bson.M{"variable": varname}).All(&result) if len(result) == 0 { blacklist[varname] = true continue } variable := result[rand.Intn(len(result))] input = strings.Replace(input, varname, variable.Value, 1) varname = r.FindString(input) } return input } func (b *Bot) listVars(channel string, parts []string) { var result []string err := b.varColl.Find(bson.M{}).Distinct("variable", &result) if err != nil { panic(err) } msg := "I know: $who, $someone, $digit, $nonzero" for _, variable := range result { msg = fmt.Sprintf("%s, %s", msg, variable) } b.SendMessage(channel, msg) } func (b *Bot) Help(channel string, parts []string) { msg := fmt.Sprintf("Hi, I'm based on godeepintir version %s. I'm written in Go, and you "+ "can find my source code on the internet here: "+ "http://github.com/chrissexton/alepale", b.Version) b.SendMessage(channel, msg) } // Send our own musings to the plugins func (b *Bot) selfSaid(channel, message string, action bool) { msg := Message{ User: &b.Me, // hack Channel: channel, Body: message, Raw: message, // hack Action: action, Command: false, Time: time.Now(), Host: "0.0.0.0", // hack } for _, name := range b.PluginOrdering { p := b.Plugins[name] if p.BotMessage(msg) { break } } }
return msg, nil }
cell_card_main.py
# -*- coding: utf-8 -*- """ Cell Card main module """ __author__ = 'Kanru Xie' import globalvar as glv from input_file_creator.write_cell_card import (objective_cell, air_cell, baseplate_cell, jaws_cell, mlc_cell) def cell_card():
c_card = '' mlc_state = glv.get_value('mlc state') if mlc_state == 'no mlc': c_card = str('c Cell card' + '\n' + 'c Water tank phantom' + '\n' + ' 1 1 -1.0 -1 2 imp:p,e 1 $water tank' + '\n' + objective_cell.obj() + '\n' + air_cell.air_card_1() + 'c Jaws' + '\n' + jaws_cell.jaws() + '\n' + baseplate_cell.baseplate() + '\n' + 'c Void' + '\n' + ' 999 0 999 imp:p,e 0' + '\n' + '\n' ) elif mlc_state == 'standard mlc': c_card = str('c Cell card' + '\n' + 'c Water tank phantom' + '\n' + ' 1 1 -1.0 -1 2 imp:p,e 1 $water tank' + '\n' + objective_cell.obj() + '\n' + air_cell.air_card_2() + 'c Jaws' + '\n' + jaws_cell.jaws() + '\n' + baseplate_cell.baseplate() + '\n' + mlc_cell.mlc_card() + 'c Void' + '\n' + ' 999 0 999 imp:p,e 0' + '\n' + '\n' ) return c_card